repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
RKrahl/pytest-dependency | pytest_dependency.py | 1 | 6492 | """$DOC"""
__version__ = "$VERSION"
import logging
import pytest
logger = logging.getLogger(__name__)
_automark = False
_ignore_unknown = False
def _get_bool(value):
"""Evaluate string representation of a boolean value.
"""
if value:
if value.lower() in ["0", "no", "n", "false", "f", "off"]:
return False
elif value.lower() in ["1", "yes", "y", "true", "t", "on"]:
return True
else:
raise ValueError("Invalid truth value '%s'" % value)
else:
return False
class DependencyItemStatus(object):
"""Status of a test item in a dependency manager.
"""
Phases = ('setup', 'call', 'teardown')
def __init__(self):
self.results = { w:None for w in self.Phases }
def __str__(self):
l = ["%s: %s" % (w, self.results[w]) for w in self.Phases]
return "Status(%s)" % ", ".join(l)
def addResult(self, rep):
self.results[rep.when] = rep.outcome
def isSuccess(self):
return list(self.results.values()) == ['passed', 'passed', 'passed']
class DependencyManager(object):
"""Dependency manager, stores the results of tests.
"""
ScopeCls = {
'session': pytest.Session,
'package': pytest.Package,
'module': pytest.Module,
'class': pytest.Class,
}
@classmethod
def getManager(cls, item, scope):
"""Get the DependencyManager object from the node at scope level.
Create it, if not yet present.
"""
node = item.getparent(cls.ScopeCls[scope])
if not node:
return None
if not hasattr(node, 'dependencyManager'):
node.dependencyManager = cls(scope)
return node.dependencyManager
def __init__(self, scope):
self.results = {}
self.scope = scope
def addResult(self, item, name, rep):
if not name:
# Old versions of pytest used to add an extra "::()" to
# the node ids of class methods to denote the class
# instance. This has been removed in pytest 4.0.0.
nodeid = item.nodeid.replace("::()::", "::")
if self.scope == 'session' or self.scope == 'package':
name = nodeid
elif self.scope == 'module':
name = nodeid.split("::", 1)[1]
elif self.scope == 'class':
name = nodeid.split("::", 2)[2]
else:
raise RuntimeError("Internal error: invalid scope '%s'"
% self.scope)
status = self.results.setdefault(name, DependencyItemStatus())
logger.debug("register %s %s %s in %s scope",
rep.when, name, rep.outcome, self.scope)
status.addResult(rep)
def checkDepend(self, depends, item):
logger.debug("check dependencies of %s in %s scope ...",
item.name, self.scope)
for i in depends:
if i in self.results:
if self.results[i].isSuccess():
logger.debug("... %s succeeded", i)
continue
else:
logger.debug("... %s has not succeeded", i)
else:
logger.debug("... %s is unknown", i)
if _ignore_unknown:
continue
logger.info("skip %s because it depends on %s", item.name, i)
pytest.skip("%s depends on %s" % (item.name, i))
def depends(request, other, scope='module'):
"""Add dependency on other test.
Call pytest.skip() unless a successful outcome of all of the tests in
other has been registered previously. This has the same effect as
the `depends` keyword argument to the :func:`pytest.mark.dependency`
marker. In contrast to the marker, this function may be called at
runtime during a test.
:param request: the value of the `request` pytest fixture related
to the current test.
:param other: dependencies, a list of names of tests that this
test depends on. The names of the dependencies must be
adapted to the scope.
:type other: iterable of :class:`str`
:param scope: the scope to search for the dependencies. Must be
either `'session'`, `'package'`, `'module'`, or `'class'`.
:type scope: :class:`str`
.. versionadded:: 0.2
.. versionchanged:: 0.5.0
the scope parameter has been added.
"""
item = request.node
manager = DependencyManager.getManager(item, scope=scope)
manager.checkDepend(other, item)
def pytest_addoption(parser):
parser.addini("automark_dependency",
"Add the dependency marker to all tests automatically",
default=False)
parser.addoption("--ignore-unknown-dependency",
action="store_true", default=False,
help="ignore dependencies whose outcome is not known")
def pytest_configure(config):
global _automark, _ignore_unknown
_automark = _get_bool(config.getini("automark_dependency"))
_ignore_unknown = config.getoption("--ignore-unknown-dependency")
config.addinivalue_line("markers",
"dependency(name=None, depends=[]): "
"mark a test to be used as a dependency for "
"other tests or to depend on other tests.")
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Store the test outcome if this item is marked "dependency".
"""
outcome = yield
marker = item.get_closest_marker("dependency")
if marker is not None or _automark:
rep = outcome.get_result()
name = marker.kwargs.get('name') if marker is not None else None
for scope in DependencyManager.ScopeCls:
manager = DependencyManager.getManager(item, scope=scope)
if (manager):
manager.addResult(item, name, rep)
def pytest_runtest_setup(item):
"""Check dependencies if this item is marked "dependency".
Skip if any of the dependencies has not been run successfully.
"""
marker = item.get_closest_marker("dependency")
if marker is not None:
depends = marker.kwargs.get('depends')
if depends:
scope = marker.kwargs.get('scope', 'module')
manager = DependencyManager.getManager(item, scope=scope)
manager.checkDepend(depends, item)
| apache-2.0 | -6,127,965,594,478,083,000 | 33.903226 | 76 | 0.580869 | false |
trondhindenes/ansible | lib/ansible/plugins/action/aruba.py | 21 | 3677 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.aruba.aruba import aruba_provider_spec
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = load_provider(aruba_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'aruba'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 | 6,255,726,228,680,200,000 | 40.314607 | 118 | 0.673647 | false |
yfu/tools | circ/find_circ/merge_bed.py | 4 | 1854 | #!/usr/bin/env python
import sys,os
from collections import defaultdict
def read_to_hash(fname,ds=0,de=0,flank=0,cover=False):
#print "loading",fname
pos = {}
for line in file(fname):
if line.startswith("#"):
continue
line = line.strip()
chrom,start,end,name,score,sense = line.split('\t')[:6]
start,end = int(start)+ds,int(end)+de
#print (chrom,start,end,sense)
pos[(chrom,start,end,sense)] = line
if flank:
for x in xrange(flank):
pos[(chrom,start-x,end,sense)] = line
pos[(chrom,start+x,end,sense)] = line
pos[(chrom,start,end-x,sense)] = line
pos[(chrom,start,end+x,sense)] = line
#if cover:
#for x in xrange
return pos
N = defaultdict(int)
anna = read_to_hash(sys.argv[1],flank=0)
N['unique_input1'] = len(anna)
#print len(anna.keys())
marv = read_to_hash(sys.argv[2])
N['unique_input2'] = len(marv)
#print len(marv.keys())
for circ,line in marv.items():
if circ in anna:
if len(sys.argv) > 3:
print "%s\t%s" % (anna[circ].split('\t')[3],line.split('\t')[3])
else:
print anna[circ]
#print "M",line
N['overlap'] += 1
del anna[circ]
else:
N['input2_not_in_input1'] += 1
#print len(anna.keys())
for k,l in anna.items():
#if "HEK" in l:
print "MISSING\t%s" % l
N['input1_not_in_input2'] += 1
for k in sorted(N.keys()):
sys.stderr.write("%s\t%d\n" % (k,N[k]))
found = N['overlap']
detected = N['unique_input2']
total = N['unique_input1']
fp = N['input2_not_in_input1']
#print "#sensitivity %d/%d = %.2f %%" % (found,total,float(found)/total*100)
#print "#FDR %d/%d = %.2f %%" % (fp,detected,float(fp)/detected*100) | gpl-3.0 | 448,779,059,131,171,500 | 27.106061 | 76 | 0.539914 | false |
incnone/necrobot | necrobot/race/publicrace/raceroom.py | 1 | 11742 | # A necrobot "casual" race room.
import asyncio
import datetime
import discord
from necrobot.botbase.botchannel import BotChannel
from necrobot.botbase.necrobot import Necrobot
from necrobot.config import Config
from necrobot.race import cmd_race
from necrobot.race import raceinfo
from necrobot.race.publicrace import cmd_publicrace
from necrobot.race.race import Race, RaceEvent
from necrobot.test import cmd_test
from necrobot.util import server
from necrobot.util import strutil
from necrobot.race import racedb
class RaceRoom(BotChannel):
def __init__(self, race_discord_channel, race_info):
BotChannel.__init__(self)
self._channel = race_discord_channel # The necrobot in which this race is taking place
self._race_info = race_info # The type of races to be run in this room
self._current_race = None # The current race
self._previous_race = None # The previous race
self._race_number = 0 # The number of races we've done
self._mention_on_new_race = [] # A list of users that should be @mentioned when a rematch is created
self._mentioned_users = [] # A list of users that were @mentioned when this race was created
self._nopoke = False # When True, the .poke command fails
self.channel_commands = [
cmd_race.Enter(self),
cmd_race.Unenter(self),
cmd_race.Ready(self),
cmd_race.Unready(self),
cmd_race.Done(self),
cmd_race.Undone(self),
cmd_race.Forfeit(self),
cmd_race.Unforfeit(self),
cmd_race.Comment(self),
cmd_race.Death(self),
cmd_race.Igt(self),
cmd_race.Time(self),
cmd_race.ForceForfeit(self),
cmd_race.ForceForfeitAll(self),
cmd_race.Pause(self),
cmd_race.Unpause(self),
cmd_race.Reseed(self),
cmd_race.ChangeRules(self),
cmd_publicrace.Rematch(self),
cmd_publicrace.Kick(self),
cmd_publicrace.Notify(self),
cmd_publicrace.Unnotify(self),
cmd_publicrace.Missing(self),
cmd_publicrace.Shame(self),
cmd_publicrace.Poke(self),
cmd_publicrace.ForceCancel(self),
cmd_publicrace.ForceClose(self),
cmd_test.TestRace(self),
]
# Properties ------------------------------
@property
def channel(self):
return self._channel
# The currently active race. Is not None.
@property
def current_race(self):
return self._current_race
# A string to add to the race details (used for private races; empty in base class)
@property
def format_rider(self):
return ''
# The most recent race to begin, or None if no such
@property
def last_begun_race(self):
if not self._current_race.before_race:
return self._current_race
else:
return self._previous_race
@property
def mentioned_users(self):
return self._mentioned_users
@property
def race_info(self):
return self._race_info
@property
def results_channel(self):
return server.find_channel(channel_name=Config.RACE_RESULTS_CHANNEL_NAME)
# Returns the string to go in the topic for the leaderboard
@property
def leaderboard(self):
new_leaderboard = '``` \n' + strutil.tickless(self._leaderboard_header(self.current_race)) \
+ self.current_race.status_str + '\n'
new_leaderboard += 'Entrants:\n'
new_leaderboard += strutil.tickless(self.current_race.leaderboard_text)
new_leaderboard += '```'
return new_leaderboard
# Returns 'header' text for the race, giving info about the rules etc.
def _leaderboard_header(self, race: Race):
room_rider = self.format_rider
if room_rider:
room_rider = ' ' + room_rider
seed_str = race.race_info.seed_str
if seed_str:
seed_str = '\n' + seed_str
return race.race_info.format_str + room_rider + seed_str + '\n'
# Methods -------------------------------------------------------------
# Notifies the given user on a rematch
def notify(self, user: discord.Member):
if user not in self._mention_on_new_race:
self._mention_on_new_race.append(user)
# Removes notifications for the given user on rematch
def dont_notify(self, user: discord.Member):
self._mention_on_new_race = [u for u in self._mention_on_new_race if u != user]
def refresh(self, channel: discord.TextChannel):
self._channel = channel
# Coroutine methods ---------------------------------------------------
# Set up the leaderboard etc. Should be called after creation; code not put into __init__ b/c coroutine
async def initialize(self):
asyncio.ensure_future(self._monitor_for_cleanup())
await self._make_new_race()
await self.write('Enter the race with `.enter`, and type `.ready` when ready. '
'Finish the race with `.done` or `.forfeit`. Use `.help` for a command list.')
# Write text to the raceroom. Return a Message for the text written
async def write(self, text: str):
await self._channel.send(text)
# Processes a race event
async def process(self, race_event: RaceEvent):
if race_event.event == RaceEvent.EventType.RACE_END:
await asyncio.sleep(1) # Waiting for a short time feels good UI-wise
await self.write(
'The race is over. Results will be recorded in {} seconds. Until then, you may comment with '
'`.comment` or add an in-game-time with `.igt`.'.format(
self.current_race.race_config.finalize_time_sec))
elif race_event.event == RaceEvent.EventType.RACE_FINALIZE:
await racedb.record_race(race_event.race)
if race_event.race.race_info.post_results:
await self.post_result(race_event.race)
elif race_event.event == RaceEvent.EventType.RACE_CANCEL:
await self.write('The race has been canceled.')
await self.update()
elif race_event.event == RaceEvent.EventType.RACER_ENTER:
self.notify(race_event.racer_member)
elif race_event.event == RaceEvent.EventType.RACER_UNENTER:
self.dont_notify(race_event.racer_member)
else:
await self.update()
# Updates the leaderboard
async def update(self):
pass
# await self._channel.edit(topic=self.leaderboard)
# Post the race result to the race necrobot
async def post_result(self, race: Race):
await self.results_channel.send(
'Race begun at {0}:\n```\n{1}{2}\n```'.format(
race.start_datetime.strftime("%d %B %Y, UTC %H:%M"),
strutil.tickless(self._leaderboard_header(race)),
strutil.tickless(race.leaderboard_text)
)
)
# Commands ------------------------------------------------------------
async def set_post_result(self, do_post: bool):
self._race_info.post_results = do_post
if self.current_race.before_race:
self.current_race.race_info = raceinfo.RaceInfo.copy(self._race_info)
if do_post:
await self.write('Races in this channel will have their results posted to the results channel.')
else:
await self.write('Races in this channel will not have their results posted to the results channel.')
# Change the RaceInfo for this room
async def change_race_info(self, command_args: list):
new_race_info = raceinfo.parse_args_modify(command_args, raceinfo.RaceInfo.copy(self._race_info))
if new_race_info:
self._race_info = new_race_info
if self.current_race.before_race:
self.current_race.race_info = raceinfo.RaceInfo.copy(self._race_info)
await self.write('Changed rules for the next race.')
await self.update()
# Close the channel.
async def close(self):
Necrobot().unregister_bot_channel(self._channel)
await self._channel.delete()
# Makes a rematch of this race if the current race is finished
async def make_rematch(self):
if self._current_race.complete:
await self._make_new_race()
# Alerts unready users
async def poke(self):
if self._nopoke or not self._current_race or not self._current_race.before_race:
return
ready_racers = []
unready_racers = []
for racer in self._current_race.racers:
if racer.is_ready:
ready_racers.append(racer)
else:
unready_racers.append(racer)
num_unready = len(unready_racers)
quorum = (num_unready == 1) or (3*num_unready <= len(ready_racers))
if ready_racers and quorum:
self._nopoke = True
alert_string = ''
for racer in unready_racers:
alert_string += racer.member.mention + ', '
await self.write('Poking {0}.'.format(alert_string[:-2]))
asyncio.ensure_future(self._run_nopoke_delay())
# Private -----------------------------------------------------------------
# Makes a new Race (and stores the previous one in self._previous race)
async def _make_new_race(self):
# Make the race
self._race_number += 1
self._previous_race = self._current_race
self._current_race = Race(self, self.race_info)
await self._current_race.initialize()
await self.update()
# Send @mention message
self._mentioned_users = []
mention_text = ''
for user in self._mention_on_new_race:
mention_text += user.mention + ' '
self._mentioned_users.append(user)
self._mention_on_new_race = []
if self.race_info.seeded:
await self._channel.send(
'{0}\nRace number {1} is open for entry. Seed: {2}.'.format(
mention_text, self._race_number, self.current_race.race_info.seed))
else:
await self._channel.send(
'{0}\nRace number {1} is open for entry.'.format(mention_text, self._race_number))
# Checks to see whether the room should be cleaned.
async def _monitor_for_cleanup(self):
while True:
await asyncio.sleep(30) # Wait between check times
# No race object
if self._current_race is None:
await self.close()
return
# Pre-race
elif self._current_race.before_race:
if not self._current_race.any_entrants:
if self._current_race.passed_no_entrants_cleanup_time:
await self.close()
return
elif self._current_race.passed_no_entrants_warning_time:
await self.write('Warning: Race has had zero entrants for some time and will be closed soon.')
# Post-race
elif self._current_race.complete:
async for msg in self._channel.history(limit=1):
if (datetime.datetime.utcnow() - msg.created_at) > Config.CLEANUP_TIME:
await self.close()
return
# Implements a delay before pokes can happen again
async def _run_nopoke_delay(self):
await asyncio.sleep(Config.RACE_POKE_DELAY)
self._nopoke = False
| mit | -4,230,707,436,161,804,000 | 38.402685 | 118 | 0.586272 | false |
morphis/home-assistant | homeassistant/util/temperature.py | 18 | 1188 | """Temperature util functions."""
from homeassistant.const import (
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
UNIT_NOT_RECOGNIZED_TEMPLATE,
TEMPERATURE
)
def fahrenheit_to_celsius(fahrenheit: float) -> float:
"""Convert a Fahrenheit temperature to Celsius."""
return (fahrenheit - 32.0) / 1.8
def celsius_to_fahrenheit(celsius: float) -> float:
"""Convert a Celsius temperature to Fahrenheit."""
return celsius * 1.8 + 32.0
def convert(temperature: float, from_unit: str, to_unit: str) -> float:
"""Convert a temperature from one unit to another."""
if from_unit not in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(from_unit,
TEMPERATURE))
if to_unit not in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(to_unit,
TEMPERATURE))
if from_unit == to_unit:
return temperature
elif from_unit == TEMP_CELSIUS:
return celsius_to_fahrenheit(temperature)
else:
return fahrenheit_to_celsius(temperature)
| apache-2.0 | -6,189,082,600,241,182,000 | 33.941176 | 74 | 0.62037 | false |
hoverinc/three.js | utils/converters/fbx/convert_to_threejs.py | 16 | 76649 | # @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
import operator
import re
import json
import types
import shutil
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_prefix = True
option_geometry = False
option_default_camera = False
option_default_light = False
option_pretty_print = False
converter = None
inputFolder = ""
outputFolder = ""
# #####################################################
# Pretty Printing Hacks
# #####################################################
# Force an array to be printed fully on a single line
class NoIndent(object):
def __init__(self, value, separator = ','):
self.separator = separator
self.value = value
def encode(self):
if not self.value:
return None
return '[ %s ]' % self.separator.join(str(f) for f in self.value)
# Force an array into chunks rather than printing each element on a new line
class ChunkedIndent(object):
def __init__(self, value, chunk_size = 15, force_rounding = False):
self.value = value
self.size = chunk_size
self.force_rounding = force_rounding
def encode(self):
# Turn the flat array into an array of arrays where each subarray is of
# length chunk_size. Then string concat the values in the chunked
# arrays, delimited with a ', ' and round the values finally append
# '{CHUNK}' so that we can find the strings with regex later
if not self.value:
return None
if self.force_rounding:
return ['{CHUNK}%s' % ', '.join(str(round(f, 6)) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
else:
return ['{CHUNK}%s' % ', '.join(str(f) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
# This custom encoder looks for instances of NoIndent or ChunkedIndent.
# When it finds
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NoIndent) or isinstance(obj, ChunkedIndent):
return obj.encode()
else:
return json.JSONEncoder.default(self, obj)
def executeRegexHacks(output_string):
# turn strings of arrays into arrays (remove the double quotes)
output_string = re.sub(':\s*\"(\[.*\])\"', r': \1', output_string)
output_string = re.sub('(\n\s*)\"(\[.*\])\"', r'\1\2', output_string)
output_string = re.sub('(\n\s*)\"{CHUNK}(.*)\"', r'\1\2', output_string)
# replace '0metadata' with metadata
output_string = re.sub('0metadata', r'metadata', output_string)
# replace 'zchildren' with children
output_string = re.sub('zchildren', r'children', output_string)
# add an extra newline after '"children": {'
output_string = re.sub('(children.*{\s*\n)', r'\1\n', output_string)
# add an extra newline after '},'
output_string = re.sub('},\s*\n', r'},\n\n', output_string)
# add an extra newline after '\n\s*],'
output_string = re.sub('(\n\s*)],\s*\n', r'\1],\n\n', output_string)
return output_string
# #####################################################
# Object Serializers
# #####################################################
# FbxVector2 is not JSON serializable
def serializeVector2(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5))
if option_pretty_print:
return NoIndent([v[0], v[1]], ', ')
else:
return [v[0], v[1]]
# FbxVector3 is not JSON serializable
def serializeVector3(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2]], ', ')
else:
return [v[0], v[1], v[2]]
# FbxVector4 is not JSON serializable
def serializeVector4(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if math.isnan(v[3]) or math.isinf(v[3]):
v[3] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5), round(v[3], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2], v[3]], ', ')
else:
return [v[0], v[1], v[2], v[3]]
# #####################################################
# Helpers
# #####################################################
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return int(color)
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
tmp = []
for uv in uvs:
tmp.append(uv[0])
tmp.append(uv[1])
if option_pretty_print:
layer = ChunkedIndent(tmp)
else:
layer = tmp
layers.append(layer)
return layers
# #####################################################
# Object Name Helpers
# #####################################################
def hasUniqueName(o, class_id):
scene = o.GetScene()
object_name = o.GetName()
object_id = o.GetUniqueID()
object_count = scene.GetSrcObjectCount(class_id)
for i in range(object_count):
other = scene.GetSrcObject(class_id, i)
other_id = other.GetUniqueID()
other_name = other.GetName()
if other_id == object_id:
continue
if other_name == object_name:
return False
return True
def getObjectName(o, force_prefix = False):
if not o:
return ""
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxNode.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Object_%s_" % object_id
return prefix + object_name
def getMaterialName(o, force_prefix = False):
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxSurfaceMaterial.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Material_%s_" % object_id
return prefix + object_name
def getTextureName(t, force_prefix = False):
if type(t) is FbxFileTexture:
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
else:
texture_id = t.GetName()
if texture_id == "_empty_":
texture_id = ""
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % t.GetUniqueID()
if len(texture_id) == 0:
prefix = prefix[0:len(prefix)-1]
return prefix + texture_id
def getMtlTextureName(texture_name, texture_id, force_prefix = False):
texture_name = os.path.splitext(texture_name)[0]
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % texture_id
return prefix + texture_name
def getPrefixedName(o, prefix):
return (prefix + '_%s_') % o.GetUniqueID() + o.GetName()
# #####################################################
# Triangulation
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate Material Object
# #####################################################
def generate_texture_bindings(material_property, material_params):
# FBX to Three.js texture types
binding_types = {
"DiffuseColor": "map",
"DiffuseFactor": "diffuseFactor",
"EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor",
"AmbientColor": "ambientMap",
"AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap",
"SpecularFactor": "specularFactor",
"ShininessExponent": "shininessExponent",
"NormalMap": "normalMap",
"Bump": "bumpMap",
"TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor",
"ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor",
"DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
def generate_material_object(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = None
material_params = None
material_type = None
if implementation:
print("Shader materials are not supported")
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
specular = getHex(material.Specular.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
shininess = material.Shininess.Get()
transparent = False
reflectivity = 1
bumpScale = 1
material_type = 'MeshPhongMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'specular' : specular,
'shininess' : shininess,
'bumpScale' : bumpScale,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
else:
print "Unknown type of Material", getMaterialName(material)
# default to Lambert Material if the current Material type cannot be handeled
if not material_type:
ambient = getHex((0,0,0))
diffuse = getHex((0.5,0.5,0.5))
emissive = getHex((0,0,0))
opacity = 1
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
if option_textures:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, material_params)
material_params['wireframe'] = False
material_params['wireframeLinewidth'] = 1
output = {
'type' : material_type,
'parameters' : material_params
}
return output
def generate_proxy_material_object(node, material_names):
material_type = 'MeshFaceMaterial'
material_params = {
'materials' : material_names
}
output = {
'type' : material_type,
'parameters' : material_params
}
return output
# #####################################################
# Find Scene Materials
# #####################################################
def extract_materials_from_node(node, material_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
if material_count > 1:
proxy_material = generate_proxy_material_object(node, material_names)
proxy_name = getMaterialName(node, True)
material_dict[proxy_name] = proxy_material
def generate_materials_from_hierarchy(node, material_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_dict)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
def generate_material_dict(scene):
material_dict = {}
# generate all materials for this scene
material_count = scene.GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for i in range(material_count):
material = scene.GetSrcObject(FbxSurfaceMaterial.ClassId, i)
material_object = generate_material_object(material)
material_name = getMaterialName(material)
material_dict[material_name] = material_object
# generate material porxies
# Three.js does not support meshs with multiple materials, however it does
# support materials with multiple submaterials
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
return material_dict
# #####################################################
# Generate Texture Object
# #####################################################
def generate_texture_object(texture):
#TODO: extract more texture properties
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
if type(texture) is FbxFileTexture:
url = texture.GetFileName()
else:
url = getTextureName( texture )
url = replace_inFolder2OutFolder( url )
output = {
'url': url,
'repeat': serializeVector2( (1,1) ),
'offset': serializeVector2( texture.GetUVTranslation() ),
'magFilter': 'LinearFilter',
'minFilter': 'LinearMipMapLinearFilter',
'anisotropy': True
}
return output
# #####################################################
# Replace Texture input path to output
# #####################################################
def replace_inFolder2OutFolder(url):
folderIndex = url.find(inputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(inputFolder): ]
url = outputFolder + url
return url
# #####################################################
# Replace Texture output path to input
# #####################################################
def replace_OutFolder2inFolder(url):
folderIndex = url.find(outputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(outputFolder): ]
url = inputFolder + url
return url
# #####################################################
# Find Scene Textures
# #####################################################
def extract_material_textures(material_property, texture_dict):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
def extract_textures_from_node(node, texture_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_dict)
def generate_textures_from_hierarchy(node, texture_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_dict)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
def generate_texture_dict(scene):
if not option_textures:
return {}
texture_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
return texture_dict
# #####################################################
# Extract Fbx SDK Mesh Data
# #####################################################
def extract_fbx_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
tmp = control_points[i]
tmp = [tmp[0], tmp[1], tmp[2]]
positions.append(tmp)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
for i in range(len(positions)):
v = positions[i]
position = FbxVector4(v[0], v[1], v[2])
position = transform.MultNormalize(position)
positions[i] = [position[0], position[1], position[2]]
return positions
def extract_fbx_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = normals_array.GetAt(i)
normal = [normal[0], normal[1], normal[2]]
normal_values.append(normal)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
t = FbxVector4(0,0,0,1)
transform.SetRow(3, t)
for i in range(len(normal_values)):
n = normal_values[i]
normal = FbxVector4(n[0], n[1], n[2])
normal = transform.MultNormalize(normal)
normal.Normalize()
normal = [normal[0], normal[1], normal[2]]
normal_values[i] = normal
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
# mapping mode is by control points. The mesh should be smooth and soft.
# we can get normals by retrieving each control point
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
# reference mode is direct, the normal index is same as vertex index.
# get normals by the index of control vertex
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
# mapping mode is by polygon-vertex.
# we can get normals by retrieving polygon-vertex.
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
normal_values = []
normal_indices = []
# Three.js only supports one layer of normals
if len(layered_normal_values) > 0:
normal_values = layered_normal_values[0]
normal_indices = layered_normal_indices[0]
return normal_values, normal_indices
def extract_fbx_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = colors_array.GetAt(i)
color = [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_indices.append( color_indices )
layered_color_values.append( color_values )
color_values = []
color_indices = []
# Three.js only supports one layer of colors
if len(layered_color_values) > 0:
color_values = layered_color_values[0]
color_indices = layered_color_indices[0]
'''
# The Fbx SDK defaults mesh.Color to (0.8, 0.8, 0.8)
# This causes most models to receive incorrect vertex colors
if len(color_values) == 0:
color = mesh.Color.Get()
color_values = [[color[0], color[1], color[2]]]
color_indices = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
color_indices.append([0] * poly_size)
'''
return color_values, color_indices
def extract_fbx_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = uvs_array.GetAt(i)
uv = [uv[0], uv[1]]
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh_uvs.GetIndexArray().GetAt(vertexId)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
# #####################################################
# Process Mesh Geometry
# #####################################################
def generate_normal_key(normal):
return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
def generate_color_key(color):
return getHex(color)
def generate_uv_key(uv):
return (round(uv[0], 6), round(uv[1], 6))
def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
source_layer_count = len(source_uvs)
for layer_index in range(source_layer_count):
dest_layer_count = len(dest_uvs)
if dest_layer_count <= layer_index:
dest_uv_layer = {}
count = 0
dest_uvs.append(dest_uv_layer)
counts.append(count)
else:
dest_uv_layer = dest_uvs[layer_index]
count = counts[layer_index]
source_uv_layer = source_uvs[layer_index]
for uv in source_uv_layer:
key = generate_uv_key(uv)
if key not in dest_uv_layer:
dest_uv_layer[key] = count
count += 1
counts[layer_index] = count
return counts
def generate_unique_normals_dictionary(mesh_list):
normals_dictionary = {}
nnormals = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
node = mesh.GetNode()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
if len(normal_values) > 0:
for normal in normal_values:
key = generate_normal_key(normal)
if key not in normals_dictionary:
normals_dictionary[key] = nnormals
nnormals += 1
return normals_dictionary
def generate_unique_colors_dictionary(mesh_list):
colors_dictionary = {}
ncolors = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
color_values, color_indices = extract_fbx_vertex_colors(mesh)
if len(color_values) > 0:
for color in color_values:
key = generate_color_key(color)
if key not in colors_dictionary:
colors_dictionary[key] = ncolors
ncolors += 1
return colors_dictionary
def generate_unique_uvs_dictionary_layers(mesh_list):
uvs_dictionary_layers = []
nuvs_list = []
# Merge meshes, remove duplicate data
for mesh in mesh_list:
uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
if len(uv_values) > 0:
nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
return uvs_dictionary_layers
def generate_normals_from_dictionary(normals_dictionary):
normal_values = []
for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
normal_values.append(key)
return normal_values
def generate_colors_from_dictionary(colors_dictionary):
color_values = []
for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
color_values.append(key)
return color_values
def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
uv_values = []
for uvs_dictionary in uvs_dictionary_layers:
uv_values_layer = []
for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
uv_values_layer.append(key)
uv_values.append(uv_values_layer)
return uv_values
def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
if len(mesh_normal_indices) <= 0:
return []
poly_normal_indices = mesh_normal_indices[poly_index]
poly_size = len(poly_normal_indices)
output_poly_normal_indices = []
for v in range(poly_size):
normal_index = poly_normal_indices[v]
normal_value = mesh_normal_values[normal_index]
key = generate_normal_key(normal_value)
output_index = normals_to_indices[key]
output_poly_normal_indices.append(output_index)
return output_poly_normal_indices
def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
if len(mesh_color_indices) <= 0:
return []
poly_color_indices = mesh_color_indices[poly_index]
poly_size = len(poly_color_indices)
output_poly_color_indices = []
for v in range(poly_size):
color_index = poly_color_indices[v]
color_value = mesh_color_values[color_index]
key = generate_color_key(color_value)
output_index = colors_to_indices[key]
output_poly_color_indices.append(output_index)
return output_poly_color_indices
def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
if len(mesh_uv_indices) <= 0:
return []
poly_uv_indices = mesh_uv_indices[poly_index]
poly_size = len(poly_uv_indices)
output_poly_uv_indices = []
for v in range(poly_size):
uv_index = poly_uv_indices[v]
uv_value = mesh_uv_values[uv_index]
key = generate_uv_key(uv_value)
output_index = uvs_to_indices[key]
output_poly_uv_indices.append(output_index)
return output_poly_uv_indices
def process_mesh_vertices(mesh_list):
vertex_offset = 0
vertex_offset_list = [0]
vertices = []
for mesh in mesh_list:
node = mesh.GetNode()
mesh_vertices = extract_fbx_vertex_positions(mesh)
vertices.extend(mesh_vertices[:])
vertex_offset += len(mesh_vertices)
vertex_offset_list.append(vertex_offset)
return vertices, vertex_offset_list
def process_mesh_materials(mesh_list):
material_offset = 0
material_offset_list = [0]
materials_list = []
#TODO: remove duplicate mesh references
for mesh in mesh_list:
node = mesh.GetNode()
material_count = node.GetMaterialCount()
if material_count > 0:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
materials_list.append( material )
material_offset += material_count
material_offset_list.append(material_offset)
return materials_list, material_offset_list
def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
faces = []
for mesh_index in range(len(mesh_list)):
mesh = mesh_list[mesh_index]
flipWindingOrder = False
node = mesh.GetNode()
if node:
local_scale = node.EvaluateLocalScaling()
if local_scale[0] < 0 or local_scale[1] < 0 or local_scale[2] < 0:
flipWindingOrder = True
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
color_values, color_indices = extract_fbx_vertex_colors(mesh)
uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
for poly_index in range(poly_count):
poly_size = mesh.GetPolygonSize(poly_index)
face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
face_uv_layers = []
for l in range(len(uv_indices_layers)):
uv_values = uv_values_layers[l]
uv_indices = uv_indices_layers[l]
face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
face_uv_layers.append(face_uv_indices)
face_vertices = []
for vertex_index in range(poly_size):
control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
face_vertices.append(control_point_index)
#TODO: assign a default material to any mesh without one
if len(material_offset_list) <= mesh_index:
material_offset = 0
else:
material_offset = material_offset_list[mesh_index]
vertex_offset = vertex_offset_list[mesh_index]
if poly_size > 4:
new_face_normals = []
new_face_colors = []
new_face_uv_layers = []
for i in range(poly_size - 2):
new_face_vertices = [face_vertices[0], face_vertices[i+1], face_vertices[i+2]]
if len(face_normals):
new_face_normals = [face_normals[0], face_normals[i+1], face_normals[i+2]]
if len(face_colors):
new_face_colors = [face_colors[0], face_colors[i+1], face_colors[i+2]]
if len(face_uv_layers):
new_face_uv_layers = []
for layer in face_uv_layers:
new_face_uv_layers.append([layer[0], layer[i+1], layer[i+2]])
face = generate_mesh_face(mesh,
poly_index,
new_face_vertices,
new_face_normals,
new_face_colors,
new_face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
else:
face = generate_mesh_face(mesh,
poly_index,
face_vertices,
face_normals,
face_colors,
face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
return faces
def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset, flipOrder):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = False
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
hasMaterial = True
break
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
if flipOrder:
if nVertices == 3:
vertex_indices = [vertex_indices[0], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[2], polygon_uvs[1]])
uv_layers = tmp
else:
vertex_indices = [vertex_indices[0], vertex_indices[3], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[3], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[3], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[3], polygon_uvs[2], polygon_uvs[3]])
uv_layers = tmp
for i in range(nVertices):
index = vertex_indices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
material_id = 0
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
material_id += material_offset
faceData.append( material_id )
if hasFaceVertexUvs:
for polygon_uvs in uv_layers:
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = normals[i]
faceData.append(index)
if hasFaceVertexColors:
for i in range(nVertices):
index = colors[i]
faceData.append(index)
return faceData
# #####################################################
# Generate Mesh Object (for scene output format)
# #####################################################
def generate_scene_output(node):
mesh = node.GetNodeAttribute()
# This is done in order to keep the scene output and non-scene output code DRY
mesh_list = [ mesh ]
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable automatic json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = ChunkedIndent(vertices, 15, True)
normal_values = ChunkedIndent(normal_values, 15, True)
color_values = ChunkedIndent(color_values, 15)
faces = ChunkedIndent(faces, 30)
metadata = {
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Mesh Object (for non-scene output)
# #####################################################
def generate_non_scene_output(scene):
mesh_list = generate_mesh_list(scene)
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = NoIndent(vertices)
normal_values = NoIndent(normal_values)
color_values = NoIndent(color_values)
faces = NoIndent(faces)
metadata = {
'formatVersion' : 3,
'type' : 'geometry',
'generatedBy' : 'convert-to-threejs.py',
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
def generate_mesh_list_from_hierarchy(node, mesh_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
mesh_list.append(node.GetNodeAttribute())
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
def generate_mesh_list(scene):
mesh_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
return mesh_list
# #####################################################
# Generate Embed Objects
# #####################################################
def generate_embed_dict_from_hierarchy(node, embed_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_object = generate_scene_output(node)
embed_name = getPrefixedName(node, 'Embed')
embed_dict[embed_name] = embed_object
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
def generate_embed_dict(scene):
embed_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
return embed_dict
# #####################################################
# Generate Geometry Objects
# #####################################################
def generate_geometry_object(node):
output = {
'type' : 'embedded',
'id' : getPrefixedName( node, 'Embed' )
}
return output
def generate_geometry_dict_from_hierarchy(node, geometry_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_object = generate_geometry_object(node)
geometry_name = getPrefixedName( node, 'Geometry' )
geometry_dict[geometry_name] = geometry_object
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
def generate_geometry_dict(scene):
geometry_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
return geometry_dict
# #####################################################
# Generate Light Node Objects
# #####################################################
def generate_default_light():
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = {
'type': 'DirectionalLight',
'color': getHex(color),
'intensity': intensity/100.00,
'direction': serializeVector3( direction ),
'target': getObjectName( None )
}
return output
def generate_light_object(node):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = None
if light_type == "directional":
# Three.js directional lights emit light from a point in 3d space to a target node or the origin.
# When there is no target, we need to take a point, one unit away from the origin, and move it
# into the right location so that the origin acts like the target
if node.GetTarget():
direction = position
else:
translation = FbxVector4(0,0,0,0)
scale = FbxVector4(1,1,1,1)
rotation = transform.GetR()
matrix = FbxMatrix(translation, rotation, scale)
direction = matrix.MultNormalize(FbxVector4(0,1,0,1))
output = {
'type': 'DirectionalLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'direction': serializeVector3( direction ),
'target': getObjectName( node.GetTarget() )
}
elif light_type == "point":
output = {
'type': 'PointLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get()
}
elif light_type == "spot":
output = {
'type': 'SpotLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get(),
'angle': light.OuterAngle.Get()*math.pi/180,
'exponent': light.DecayType.Get(),
'target': getObjectName( node.GetTarget() )
}
return output
def generate_ambient_light(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
output = {
'type': 'AmbientLight',
'color': getHex(ambient_color)
}
return output
# #####################################################
# Generate Camera Node Objects
# #####################################################
def generate_default_camera():
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
def generate_camera_object(node):
camera = node.GetNodeAttribute()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
name = getObjectName( node )
output = {}
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'aspect': aspect,
'near': near,
'far': far,
'position': serializeVector3( position )
}
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = {
'type': 'PerspectiveCamera',
'left': left,
'right': right,
'top': top,
'bottom': bottom,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
# #####################################################
# Generate Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate Mesh Node Object
# #####################################################
def generate_mesh_object(node):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
if not material_count > 1 and not len(material_names) > 0:
material_names.append('')
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
output = {
'geometry': getPrefixedName( node, 'Geometry' ),
'material': material_name,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True,
}
return output
# #####################################################
# Generate Node Object
# #####################################################
def generate_object(node):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
name = getObjectName( node )
output = {
'fbx_type': node_type,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True
}
return output
# #####################################################
# Parse Scene Node Objects
# #####################################################
def generate_object_hierarchy(node, object_dict):
object_count = 0
if node.GetNodeAttribute() == None:
object_data = generate_object(node)
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_data = generate_mesh_object(node)
elif attribute_type == FbxNodeAttribute.eLight:
object_data = generate_light_object(node)
elif attribute_type == FbxNodeAttribute.eCamera:
object_data = generate_camera_object(node)
else:
object_data = generate_object(node)
object_count += 1
object_name = getObjectName(node)
object_children = {}
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_children)
if node.GetChildCount() > 0:
# Having 'children' above other attributes is hard to read.
# We can send it to the bottom using the last letter of the alphabet 'z'.
# This letter is removed from the final output.
if option_pretty_print:
object_data['zchildren'] = object_children
else:
object_data['children'] = object_children
object_dict[object_name] = object_data
return object_count
def generate_scene_objects(scene):
object_count = 0
object_dict = {}
ambient_light = generate_ambient_light(scene)
if ambient_light:
object_dict['AmbientLight'] = ambient_light
object_count += 1
if option_default_light:
default_light = generate_default_light()
object_dict['DefaultLight'] = default_light
object_count += 1
if option_default_camera:
default_camera = generate_default_camera()
object_dict['DefaultCamera'] = default_camera
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_dict)
return object_dict, object_count
# #####################################################
# Generate Scene Output
# #####################################################
def extract_scene(scene, filename):
global_settings = scene.GetGlobalSettings()
objects, nobjects = generate_scene_objects(scene)
textures = generate_texture_dict(scene)
materials = generate_material_dict(scene)
geometries = generate_geometry_dict(scene)
embeds = generate_embed_dict(scene)
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = serializeVector3( (0,0,0) )
rotation = serializeVector3( (0,0,0) )
scale = serializeVector3( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
# This does not seem to be any help here
# global_settings.GetDefaultCamera()
defcamera = camera_names[0] if len(camera_names) > 0 else ""
if option_default_camera:
defcamera = 'default_camera'
metadata = {
'formatVersion': 3.2,
'type': 'scene',
'generatedBy': 'convert-to-threejs.py',
'objects': nobjects,
'geometries': ngeometries,
'materials': nmaterials,
'textures': ntextures
}
transform = {
'position' : position,
'rotation' : rotation,
'scale' : scale
}
defaults = {
'bgcolor' : 0,
'camera' : defcamera,
'fog' : ''
}
output = {
'objects': objects,
'geometries': geometries,
'materials': materials,
'textures': textures,
'embeds': embeds,
'transform': transform,
'defaults': defaults,
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Non-Scene Output
# #####################################################
def extract_geometry(scene, filename):
output = generate_non_scene_output(scene)
return output
# #####################################################
# File Helpers
# #####################################################
def write_file(filepath, content):
index = filepath.rfind('/')
dir = filepath[0:index]
if not os.path.exists(dir):
os.makedirs(dir)
out = open(filepath, "w")
out.write(content.encode('utf8', 'replace'))
out.close()
def read_file(filepath):
f = open(filepath)
content = f.readlines()
f.close()
return content
def copy_textures(textures):
texture_dict = {}
for key in textures:
url = textures[key]['url']
src = replace_OutFolder2inFolder(url)
if url in texture_dict: # texture has been copied
continue
if not os.path.exists(src):
print("copy_texture error: we can't find this texture at " + src)
continue
try:
index = url.rfind('/')
folder = url[0:index]
if len(folder) and not os.path.exists(folder):
os.makedirs(folder)
shutil.copyfile(src, url)
texture_dict[url] = True
except IOError as e:
print "I/O error({0}): {1} {2}".format(e.errno, e.strerror, src)
def findFilesWithExt(directory, ext, include_path = True):
ext = ext.lower()
found = []
for root, dirs, files in os.walk(directory):
for filename in files:
current_ext = os.path.splitext(filename)[1].lower()
if current_ext == ext:
if include_path:
found.append(os.path.join(root, filename))
else:
found.append(filename)
return found
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--ignore-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-u', '--force-prefix', action='store_true', dest='prefix', help="prefix all object names in output file to ensure uniqueness", default=False)
parser.add_option('-f', '--flatten-scene', action='store_true', dest='geometry', help="merge all geometries and apply node transforms", default=False)
parser.add_option('-c', '--add-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--add-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
parser.add_option('-p', '--pretty-print', action='store_true', dest='pretty', help="prefix all object names in output file", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_prefix = options.prefix
option_geometry = options.geometry
option_default_camera = options.defcamera
option_default_light = options.deflight
option_pretty_print = options.pretty
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
# According to asset's coordinate to convert scene
upVector = scene.GetGlobalSettings().GetAxisSystem().GetUpVector();
axis_system = FbxAxisSystem.MayaYUp
if upVector[0] == 3:
axis_system = FbxAxisSystem.MayaZUp
axis_system.ConvertScene(scene)
inputFolder = args[0].replace( "\\", "/" );
index = args[0].rfind( "/" );
inputFolder = inputFolder[:index]
outputFolder = args[1].replace( "\\", "/" );
index = args[1].rfind( "/" );
outputFolder = outputFolder[:index]
if option_geometry:
output_content = extract_geometry(scene, os.path.basename(args[0]))
else:
output_content = extract_scene(scene, os.path.basename(args[0]))
if option_pretty_print:
output_string = json.dumps(output_content, indent=4, cls=CustomEncoder, separators=(',', ': '), sort_keys=True)
output_string = executeRegexHacks(output_string)
else:
output_string = json.dumps(output_content, separators=(',', ': '), sort_keys=True)
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_string)
copy_textures( output_content['textures'] )
print("\nExported Three.js file to:\n%s\n" % output_path)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0)
| mit | 1,759,656,565,885,856,800 | 34.485648 | 214 | 0.579551 | false |
fidodaj/project2 | server/lib/werkzeug/debug/tbtools.py | 311 | 16785 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import json
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
from werkzeug._compat import range_type, PY2, text_type, string_types
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(r'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = '\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does not by
accident trigger a request to /favicon.ico which might change the application
state. -->
<link rel="shortcut icon" href="?__debugger__=yes&cmd=resource&f=console.png">
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
SECRET = "%(secret)s";
</script>
</head>
<body>
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u'''\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret):
return CONSOLE_HTML % {
'evalex': 'true',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = ''.join(buf).strip()
return rv.decode('utf-8', 'replace') if PY2 else rv
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u'\n'
if PY2:
tb.encode('utf-8', 'replace')
logfile.write(tb)
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps({
'description': 'Werkzeug Internal Server Error',
'public': False,
'files': {
'traceback.txt': {
'content': self.plaintext
}
}
}).encode('utf-8')
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen('https://api.github.com/gists', data=data)
resp = json.loads(rv.read().decode('utf-8'))
rv.close()
return {
'url': resp['html_url'],
'id': resp['id']
}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = u'Syntax Error'
else:
title = u'Traceback <em>(most recent call last)</em>:'
for frame in self.frames:
frames.append(u'<li%s>%s' % (
frame.info and u' title="%s"' % escape(frame.info) or u'',
frame.render()
))
if self.is_syntax_error:
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
else:
description_wrapper = u'<blockquote>%s</blockquote>'
return SUMMARY_HTML % {
'classes': u' '.join(classes),
'title': title and u'<h3>%s</h3>' % title or u'',
'frames': u'\n'.join(frames),
'description': description_wrapper % escape(self.exception)
}
def render_full(self, evalex=False, secret=None):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': self.render_summary(include_title=False),
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'secret': secret
}
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield u'Traceback (most recent call last):'
for frame in self.frames:
yield u' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
)
yield u' ' + frame.current_line.strip()
yield self.exception
def plaintext(self):
return u'\n'.join(self.generate_plaintext_traceback())
plaintext = cached_property(plaintext)
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = fn
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = text_type(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'current_line': escape(self.current_line.strip())
}
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def render_source(self):
"""Render the sourcecode."""
return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in
self.get_annotated_lines())
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, unicode):
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(self.filename)
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _line_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u''
@cached_property
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
| apache-2.0 | -2,235,704,901,760,546,800 | 32.041339 | 103 | 0.552815 | false |
wangscript/libjingle-1 | trunk/tools/python/google/path_utils.py | 191 | 2910 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Some utility methods for getting and manipulating paths."""
# TODO(pamg): Have the buildbot use these, too.
import errno
import os
import sys
class PathNotFound(Exception): pass
def ScriptDir():
"""Get the full path to the directory containing the current script."""
script_filename = os.path.abspath(sys.argv[0])
return os.path.dirname(script_filename)
def FindAncestor(start_dir, ancestor):
"""Finds an ancestor dir in a path.
For example, FindAncestor('c:\foo\bar\baz', 'bar') would return
'c:\foo\bar'. Unlike FindUpward*, this only looks at direct path ancestors.
"""
start_dir = os.path.abspath(start_dir)
path = start_dir
while True:
(parent, tail) = os.path.split(path)
if tail == ancestor:
return path
if not tail:
break
path = parent
raise PathNotFound("Unable to find ancestor %s in %s" % (ancestor, start_dir))
def FindUpwardParent(start_dir, *desired_list):
"""Finds the desired object's parent, searching upward from the start_dir.
Searches start_dir and all its parents looking for the desired directory
or file, which may be given in one or more path components. Returns the
first directory in which the top desired path component was found, or raises
PathNotFound if it wasn't.
"""
desired_path = os.path.join(*desired_list)
last_dir = ''
cur_dir = start_dir
found_path = os.path.join(cur_dir, desired_path)
while not os.path.exists(found_path):
last_dir = cur_dir
cur_dir = os.path.dirname(cur_dir)
if last_dir == cur_dir:
raise PathNotFound('Unable to find %s above %s' %
(desired_path, start_dir))
found_path = os.path.join(cur_dir, desired_path)
# Strip the entire original desired path from the end of the one found
# and remove a trailing path separator, if present.
found_path = found_path[:len(found_path) - len(desired_path)]
if found_path.endswith(os.sep):
found_path = found_path[:len(found_path) - 1]
return found_path
def FindUpward(start_dir, *desired_list):
"""Returns a path to the desired directory or file, searching upward.
Searches start_dir and all its parents looking for the desired directory
or file, which may be given in one or more path components. Returns the full
path to the desired object, or raises PathNotFound if it wasn't found.
"""
parent = FindUpwardParent(start_dir, *desired_list)
return os.path.join(parent, *desired_list)
def MaybeMakeDirectory(*path):
"""Creates an entire path, if it doesn't already exist."""
file_path = os.path.join(*path)
try:
os.makedirs(file_path)
except OSError, e:
# errno.EEXIST is "File exists". If we see another error, re-raise.
if e.errno != errno.EEXIST:
raise
| bsd-3-clause | -5,272,682,117,765,833,000 | 33.642857 | 80 | 0.701375 | false |
uclouvain/osis | education_group/views/access_requirements/create.py | 1 | 3536 | #
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.conf import settings
from django.contrib.messages.views import SuccessMessageMixin
from django.utils.translation import gettext_lazy as _
from django.views.generic import CreateView
from base.business.education_groups.access_requirements import can_postpone_access_requirements
from base.models.admission_condition import AdmissionConditionLine
from base.models.enums.access_requirements_sections import ConditionSectionsTypes
from base.views.mixins import AjaxTemplateMixin
from education_group.forms.admission_condition import CreateLineEnglishForm, \
CreateLineFrenchForm
from education_group.views.access_requirements.common import AccessRequirementsMixin
from osis_role.contrib.views import PermissionRequiredMixin
class CreateAccessRequirementsLine(SuccessMessageMixin, AccessRequirementsMixin, PermissionRequiredMixin,
AjaxTemplateMixin, CreateView):
template_name = "education_group_app/access_requirements/line_edit.html"
raise_exception = True
force_reload = True
model = AdmissionConditionLine
def get_permission_object(self):
return self.get_admission_condition_object().education_group_year
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["can_postpone"] = can_postpone_access_requirements(
self.get_admission_condition_object().education_group_year
)
context["section"] = ConditionSectionsTypes.get_value(self.request.GET["section"])
return context
def get_initial(self):
initial = super().get_initial()
initial["section"] = self.request.GET["section"]
return initial
def form_valid(self, form):
form.instance.admission_condition = self.get_admission_condition_object()
return super().form_valid(form)
def get_form_class(self):
language = self.request.GET['language']
if language == settings.LANGUAGE_CODE_EN:
return CreateLineEnglishForm
return CreateLineFrenchForm
def get_success_url(self):
return ""
def get_success_message(self, cleaned_data):
if self.request.POST.get('to_postpone'):
return _("Condition has been created (with postpone)")
return _("Condition has been created (without postpone)")
| agpl-3.0 | -8,946,607,689,185,371,000 | 43.746835 | 105 | 0.714003 | false |
dpetzold/django | tests/middleware/tests.py | 13 | 34932 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import random
import re
from io import BytesIO
from unittest import skipIf
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.http import (
FileResponse, HttpRequest, HttpResponse, HttpResponseNotFound,
HttpResponsePermanentRedirect, HttpResponseRedirect, StreamingHttpResponse,
)
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import (
BrokenLinkEmailsMiddleware, CommonMiddleware,
)
from django.middleware.gzip import GZipMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import quote
@override_settings(ROOT_URLCONF='middleware.urls')
class CommonMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/slash/')
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/noslash')
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/unknown')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/slash/')
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_querystring(self):
"""
APPEND_SLASH should preserve querystrings when redirecting.
"""
request = self.rf.get('/slash?test=1')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.url, '/slash/?test=1')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST, PUT, or PATCH to an URL which
would normally be redirected to a slashed version.
"""
msg = "maintaining %s data. Change your form to point to testserver/slash/"
request = self.rf.get('/slash')
request.method = 'POST'
response = HttpResponseNotFound()
with six.assertRaisesRegex(self, RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PUT'
with six.assertRaisesRegex(self, RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PATCH'
with six.assertRaisesRegex(self, RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/slash')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
URLs which require quoting should be redirected to their slash version ok.
"""
request = self.rf.get(quote('/needsquoting#'))
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self.rf.get('/path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://www.testserver/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self.rf.get('/slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/slash/')
# The following tests examine expected behavior given a custom URLconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/customurlconf/noslash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/customurlconf/unknown')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
request.method = 'POST'
response = HttpResponseNotFound()
with six.assertRaisesRegex(self, RuntimeError, 'end in a slash'):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled_custom_urlconf(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted_custom_urlconf(self):
"""
URLs which require quoting should be redirected to their slash version ok.
"""
request = self.rf.get(quote('/customurlconf/needsquoting#'))
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'/customurlconf/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www_custom_urlconf(self):
request = self.rf.get('/customurlconf/path/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r.url,
'http://www.testserver/customurlconf/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/customurlconf/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url,
'http://www.testserver/customurlconf/slash/')
# Other tests
@override_settings(DISALLOWED_USER_AGENTS=[re.compile(r'foo')])
def test_disallowed_user_agents(self):
request = self.rf.get('/slash')
request.META['HTTP_USER_AGENT'] = 'foo'
with self.assertRaisesMessage(PermissionDenied, 'Forbidden user agent'):
CommonMiddleware().process_request(request)
def test_non_ascii_query_string_does_not_crash(self):
"""Regression test for #15152"""
request = self.rf.get('/slash')
request.META['QUERY_STRING'] = force_str('drink=café')
r = CommonMiddleware().process_request(request)
self.assertIsNone(r)
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
def test_response_redirect_class(self):
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponsePermanentRedirect)
def test_response_redirect_class_subclass(self):
class MyCommonMiddleware(CommonMiddleware):
response_redirect_class = HttpResponseRedirect
request = self.rf.get('/slash')
response = HttpResponseNotFound()
r = MyCommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, '/slash/')
self.assertIsInstance(r, HttpResponseRedirect)
@override_settings(
IGNORABLE_404_URLS=[re.compile(r'foo')],
MANAGERS=['[email protected]'],
)
class BrokenLinkEmailsMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
def setUp(self):
self.req = self.rf.get('/regular_url/that/does/not/exist')
self.resp = self.client.get(self.req.path)
def test_404_error_reporting(self):
self.req.META['HTTP_REFERER'] = '/another/url/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
self.req.path = self.req.path_info = 'foo_url/that/does/not/exist'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
@skipIf(six.PY3, "HTTP_REFERER is str type on Python 3")
def test_404_error_nonascii_referrer(self):
# Such referer strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = b'http://testserver/c/\xd0\xbb\xd0\xb8/'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@skipIf(six.PY3, "HTTP_USER_AGENT is str type on Python 3")
def test_404_error_nonascii_user_agent(self):
# Such user agent strings should not happen, but anyway, if it happens,
# let's not crash
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = b'\xd0\xbb\xd0\xb8\xff\xff'
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('User agent: \u043b\u0438\ufffd\ufffd\n', mail.outbox[0].body)
def test_custom_request_checker(self):
class SubclassedMiddleware(BrokenLinkEmailsMiddleware):
ignored_user_agent_patterns = (re.compile(r'Spider.*'),
re.compile(r'Robot.*'))
def is_ignorable_request(self, request, uri, domain, referer):
'''Check user-agent in addition to normal checks.'''
if super(SubclassedMiddleware, self).is_ignorable_request(request, uri, domain, referer):
return True
user_agent = request.META['HTTP_USER_AGENT']
return any(pattern.search(user_agent) for pattern in
self.ignored_user_agent_patterns)
self.req.META['HTTP_REFERER'] = '/another/url/'
self.req.META['HTTP_USER_AGENT'] = 'Spider machine 3.4'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
self.req.META['HTTP_USER_AGENT'] = 'My user agent'
SubclassedMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
def test_referer_equal_to_requested_url(self):
"""
Some bots set the referer to the current URL to avoid being blocked by
an referer check (#25302).
"""
self.req.META['HTTP_REFERER'] = self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
# URL with scheme and domain should also be ignored
self.req.META['HTTP_REFERER'] = 'http://testserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 0)
def test_referer_equal_to_requested_url_on_another_domain(self):
self.req.META['HTTP_REFERER'] = 'http://anotherserver%s' % self.req.path
BrokenLinkEmailsMiddleware().process_response(self.req, self.resp)
self.assertEqual(len(mail.outbox), 1)
@override_settings(ROOT_URLCONF='middleware.cond_get_urls')
class ConditionalGetMiddlewareTest(SimpleTestCase):
def setUp(self):
self.req = RequestFactory().get('/')
self.resp = self.client.get(self.req.path_info)
# Tests for the Date header
def test_date_header_added(self):
self.assertNotIn('Date', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Date', self.resp)
# Tests for the Content-Length header
def test_content_length_header_added(self):
content_length = len(self.resp.content)
self.assertNotIn('Content-Length', self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertIn('Content-Length', self.resp)
self.assertEqual(int(self.resp['Content-Length']), content_length)
def test_content_length_header_not_added(self):
resp = StreamingHttpResponse('content')
self.assertNotIn('Content-Length', resp)
resp = ConditionalGetMiddleware().process_response(self.req, resp)
self.assertNotIn('Content-Length', resp)
def test_content_length_header_not_changed(self):
bad_content_length = len(self.resp.content) + 10
self.resp['Content-Length'] = bad_content_length
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(int(self.resp['Content-Length']), bad_content_length)
# Tests for the ETag header
def test_if_none_match_and_no_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_redirect(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_none_match_and_client_error(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
@override_settings(USE_ETAGS=True)
def test_etag(self):
req = HttpRequest()
res = HttpResponse('content')
self.assertTrue(
CommonMiddleware().process_response(req, res).has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
res['ETag'] = 'tomatoes'
self.assertEqual(
CommonMiddleware().process_response(req, res).get('ETag'),
'tomatoes')
@override_settings(USE_ETAGS=True)
def test_no_etag_streaming_response(self):
req = HttpRequest()
res = StreamingHttpResponse(['content'])
self.assertFalse(
CommonMiddleware().process_response(req, res).has_header('ETag'))
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_redirect(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp['Location'] = '/'
self.resp.status_code = 301
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 301)
def test_if_modified_since_and_client_error(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp.status_code = 400
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 400)
class XFrameOptionsMiddlewareTest(SimpleTestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def test_same_origin(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to SAMEORIGIN to
have the middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='sameorigin'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_deny(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to DENY to
have the middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
with override_settings(X_FRAME_OPTIONS='deny'):
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_defaults_sameorigin(self):
"""
Tests that if the X_FRAME_OPTIONS setting is not set then it defaults
to SAMEORIGIN.
"""
with override_settings(X_FRAME_OPTIONS=None):
del settings.X_FRAME_OPTIONS # restored by override_settings
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_dont_set_if_set(self):
"""
Tests that if the X-Frame-Options header is already set then the
middleware does not attempt to override it.
"""
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response['X-Frame-Options'] = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response['X-Frame-Options'] = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_response_exempt(self):
"""
Tests that if the response has a xframe_options_exempt attribute set
to False then it still sets the header, but if it's set to True then
it does not.
"""
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
response = HttpResponse()
response.xframe_options_exempt = False
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
response = HttpResponse()
response.xframe_options_exempt = True
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r.get('X-Frame-Options', None), None)
def test_is_extendable(self):
"""
Tests that the XFrameOptionsMiddleware method that determines the
X-Frame-Options header value can be overridden based on something in
the request or response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, 'sameorigin', False):
return 'SAMEORIGIN'
if getattr(response, 'sameorigin', False):
return 'SAMEORIGIN'
return 'DENY'
with override_settings(X_FRAME_OPTIONS='DENY'):
response = HttpResponse()
response.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(request,
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
with override_settings(X_FRAME_OPTIONS='SAMEORIGIN'):
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
class GZipMiddlewareTest(SimpleTestCase):
"""
Tests the GZip middleware.
"""
short_string = b"This string is too short to be worth compressing."
compressible_string = b'a' * 500
uncompressible_string = b''.join(six.int2byte(random.randint(0, 255)) for _ in range(500))
sequence = [b'a' * 500, b'b' * 200, b'a' * 300]
sequence_unicode = ['a' * 500, 'é' * 200, 'a' * 300]
def setUp(self):
self.req = RequestFactory().get('/')
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp = StreamingHttpResponse(self.sequence)
self.stream_resp['Content-Type'] = 'text/html; charset=UTF-8'
self.stream_resp_unicode = StreamingHttpResponse(self.sequence_unicode)
self.stream_resp_unicode['Content-Type'] = 'text/html; charset=UTF-8'
@staticmethod
def decompress(gzipped_string):
with gzip.GzipFile(mode='rb', fileobj=BytesIO(gzipped_string)) as f:
return f.read()
def test_compress_response(self):
"""
Tests that compression is performed on responses with compressible content.
"""
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertEqual(r.get('Content-Length'), str(len(r.content)))
def test_compress_streaming_response(self):
"""
Tests that compression is performed on responses with streaming content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp)
self.assertEqual(self.decompress(b''.join(r)), b''.join(self.sequence))
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_streaming_response_unicode(self):
"""
Tests that compression is performed on responses with streaming Unicode content.
"""
r = GZipMiddleware().process_response(self.req, self.stream_resp_unicode)
self.assertEqual(self.decompress(b''.join(r)), b''.join(x.encode('utf-8') for x in self.sequence_unicode))
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertFalse(r.has_header('Content-Length'))
def test_compress_file_response(self):
"""
Tests that compression is performed on FileResponse.
"""
open_file = lambda: open(__file__, 'rb')
with open_file() as file1:
file_resp = FileResponse(file1)
file_resp['Content-Type'] = 'text/html; charset=UTF-8'
r = GZipMiddleware().process_response(self.req, file_resp)
with open_file() as file2:
self.assertEqual(self.decompress(b''.join(r)), file2.read())
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertIsNot(r.file_to_stream, file1)
def test_compress_non_200_response(self):
"""
Tests that compression is performed on responses with a status other than 200.
See #10762.
"""
self.resp.status_code = 404
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
def test_no_compress_short_response(self):
"""
Tests that compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.short_string)
self.assertEqual(r.get('Content-Encoding'), None)
def test_no_compress_compressed_response(self):
"""
Tests that compression isn't performed on responses that are already compressed.
"""
self.resp['Content-Encoding'] = 'deflate'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'deflate')
def test_no_compress_uncompressible_response(self):
"""
Tests that compression isn't performed on responses with uncompressible content.
"""
self.resp.content = self.uncompressible_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.uncompressible_string)
self.assertEqual(r.get('Content-Encoding'), None)
@override_settings(USE_ETAGS=True)
class ETagGZipMiddlewareTest(SimpleTestCase):
"""
Tests if the ETag middleware behaves correctly with GZip middleware.
"""
rf = RequestFactory()
compressible_string = b'a' * 500
def test_compress_response(self):
"""
Tests that ETag is changed after gzip compression is performed.
"""
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
gzip_etag = response.get('ETag')
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
nogzip_etag = response.get('ETag')
self.assertNotEqual(gzip_etag, nogzip_etag)
| bsd-3-clause | -5,534,503,060,564,332,000 | 43.440204 | 114 | 0.642628 | false |
leviroth/praw | praw/models/reddit/subreddit.py | 1 | 101559 | """Provide the Subreddit class."""
# pylint: disable=too-many-lines
from copy import deepcopy
from json import dumps, loads
from os.path import basename, dirname, join
from urllib.parse import urljoin
from prawcore import Redirect
import websocket
from ...const import API_PATH, JPEG_HEADER
from ...exceptions import APIException, ClientException
from ...util.cache import cachedproperty
from ..util import permissions_string, stream_generator
from ..listing.generator import ListingGenerator
from ..listing.mixins import SubredditListingMixin
from .base import RedditBase
from .emoji import SubredditEmoji
from .mixins import FullnameMixin, MessageableMixin
from .modmail import ModmailConversation
from .widgets import SubredditWidgets
from .wikipage import WikiPage
class Subreddit(
MessageableMixin, SubredditListingMixin, FullnameMixin, RedditBase
):
"""A class for Subreddits.
To obtain an instance of this class for subreddit ``/r/redditdev`` execute:
.. code:: python
subreddit = reddit.subreddit('redditdev')
While ``/r/all`` is not a real subreddit, it can still be treated like
one. The following outputs the titles of the 25 hottest submissions in
``/r/all``:
.. code:: python
for submission in reddit.subreddit('all').hot(limit=25):
print(submission.title)
Multiple subreddits can be combined like so:
.. code:: python
for submission in reddit.subreddit('redditdev+learnpython').top('all'):
print(submission)
Subreddits can be filtered from combined listings as follows. Note that
these filters are ignored by certain methods, including
:attr:`~praw.models.Subreddit.comments`,
:meth:`~praw.models.Subreddit.gilded`, and
:meth:`.SubredditStream.comments`.
.. code:: python
for submission in reddit.subreddit('all-redditdev').new():
print(submission)
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
comprehensive in any way.
========================== ===============================================
Attribute Description
========================== ===============================================
``can_assign_link_flair`` Whether users can assign their own link flair.
``can_assign_user_flair`` Whether users can assign their own user flair.
``created_utc`` Time the subreddit was created, represented in
`Unix Time`_.
``description`` Subreddit description, in Markdown.
``description_html`` Subreddit description, in HTML.
``display_name`` Name of the subreddit.
``id`` ID of the subreddit.
``name`` Fullname of the subreddit.
``over18`` Whether the subreddit is NSFW.
``public_description`` Description of the subreddit, shown in searches
and on the "You must be invited to visit this
community" page (if applicable).
``spoilers_enabled`` Whether the spoiler tag feature is enabled.
``subscribers`` Count of subscribers.
``user_is_banned`` Whether the authenticated user is banned.
``user_is_moderator`` Whether the authenticated user is a moderator.
``user_is_subscriber`` Whether the authenticated user is subscribed.
========================== ===============================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
# pylint: disable=too-many-public-methods
STR_FIELD = "display_name"
MESSAGE_PREFIX = "#"
@staticmethod
def _create_or_update(
_reddit,
allow_images=None,
allow_post_crossposts=None,
allow_top=None,
collapse_deleted_comments=None,
comment_score_hide_mins=None,
description=None,
domain=None,
exclude_banned_modqueue=None,
header_hover_text=None,
hide_ads=None,
lang=None,
key_color=None,
link_type=None,
name=None,
over_18=None,
public_description=None,
public_traffic=None,
show_media=None,
show_media_preview=None,
spam_comments=None,
spam_links=None,
spam_selfposts=None,
spoilers_enabled=None,
sr=None,
submit_link_label=None,
submit_text=None,
submit_text_label=None,
subreddit_type=None,
suggested_comment_sort=None,
title=None,
wiki_edit_age=None,
wiki_edit_karma=None,
wikimode=None,
**other_settings
):
# pylint: disable=invalid-name,too-many-locals,too-many-arguments
model = {
"allow_images": allow_images,
"allow_post_crossposts": allow_post_crossposts,
"allow_top": allow_top,
"collapse_deleted_comments": collapse_deleted_comments,
"comment_score_hide_mins": comment_score_hide_mins,
"description": description,
"domain": domain,
"exclude_banned_modqueue": exclude_banned_modqueue,
"header-title": header_hover_text, # Remap here - better name
"hide_ads": hide_ads,
"key_color": key_color,
"lang": lang,
"link_type": link_type,
"name": name,
"over_18": over_18,
"public_description": public_description,
"public_traffic": public_traffic,
"show_media": show_media,
"show_media_preview": show_media_preview,
"spam_comments": spam_comments,
"spam_links": spam_links,
"spam_selfposts": spam_selfposts,
"spoilers_enabled": spoilers_enabled,
"sr": sr,
"submit_link_label": submit_link_label,
"submit_text": submit_text,
"submit_text_label": submit_text_label,
"suggested_comment_sort": suggested_comment_sort,
"title": title,
"type": subreddit_type,
"wiki_edit_age": wiki_edit_age,
"wiki_edit_karma": wiki_edit_karma,
"wikimode": wikimode,
}
model.update(other_settings)
_reddit.post(API_PATH["site_admin"], data=model)
@staticmethod
def _subreddit_list(subreddit, other_subreddits):
if other_subreddits:
return ",".join(
[str(subreddit)] + [str(x) for x in other_subreddits]
)
return str(subreddit)
@property
def _kind(self):
"""Return the class's kind."""
return self._reddit.config.kinds["subreddit"]
@cachedproperty
def banned(self):
"""Provide an instance of :class:`.SubredditRelationship`.
For example to ban a user try:
.. code-block:: python
reddit.subreddit('SUBREDDIT').banned.add('NAME', ban_reason='...')
To list the banned users along with any notes, try:
.. code-block:: python
for ban in reddit.subreddit('SUBREDDIT').banned():
print('{}: {}'.format(ban, ban.note))
"""
return SubredditRelationship(self, "banned")
@cachedproperty
def collections(self):
r"""Provide an instance of :class:`.SubredditCollections`.
To see the permalinks of all :class:`.Collection`\ s that belong to
a subreddit, try:
.. code-block:: python
for collection in reddit.subreddit('SUBREDDIT').collections:
print(collection.permalink)
To get a specific :class:`.Collection` by its UUID or permalink,
use one of the following:
.. code-block:: python
collection = reddit.subreddit('SUBREDDIT').collections('some_uuid')
collection = reddit.subreddit('SUBREDDIT').collections(
permalink='https://reddit.com/r/SUBREDDIT/collection/some_uuid')
"""
return self._subreddit_collections_class(self._reddit, self)
@cachedproperty
def contributor(self):
"""Provide an instance of :class:`.ContributorRelationship`.
Contributors are also known as approved submitters.
To add a contributor try:
.. code-block:: python
reddit.subreddit('SUBREDDIT').contributor.add('NAME')
"""
return ContributorRelationship(self, "contributor")
@cachedproperty
def emoji(self):
"""Provide an instance of :class:`.SubredditEmoji`.
This attribute can be used to discover all emoji for a subreddit:
.. code:: python
for emoji in reddit.subreddit('iama').emoji:
print(emoji)
A single emoji can be lazily retrieved via:
.. code:: python
reddit.subreddit('blah').emoji['emoji_name']
.. note:: Attempting to access attributes of an nonexistent emoji will
result in a :class:`.ClientException`.
"""
return SubredditEmoji(self)
@cachedproperty
def filters(self):
"""Provide an instance of :class:`.SubredditFilters`."""
return SubredditFilters(self)
@cachedproperty
def flair(self):
"""Provide an instance of :class:`.SubredditFlair`.
Use this attribute for interacting with a subreddit's flair. For
example to list all the flair for a subreddit which you have the
``flair`` moderator permission on try:
.. code-block:: python
for flair in reddit.subreddit('NAME').flair():
print(flair)
Flair templates can be interacted with through this attribute via:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.templates:
print(template)
"""
return SubredditFlair(self)
@cachedproperty
def mod(self):
"""Provide an instance of :class:`.SubredditModeration`."""
return SubredditModeration(self)
@cachedproperty
def moderator(self):
"""Provide an instance of :class:`.ModeratorRelationship`.
For example to add a moderator try:
.. code-block:: python
reddit.subreddit('SUBREDDIT').moderator.add('NAME')
To list the moderators along with their permissions try:
.. code-block:: python
for moderator in reddit.subreddit('SUBREDDIT').moderator():
print('{}: {}'.format(moderator, moderator.mod_permissions))
"""
return ModeratorRelationship(self, "moderator")
@cachedproperty
def modmail(self):
"""Provide an instance of :class:`.Modmail`."""
return Modmail(self)
@cachedproperty
def muted(self):
"""Provide an instance of :class:`.SubredditRelationship`."""
return SubredditRelationship(self, "muted")
@cachedproperty
def quaran(self):
"""Provide an instance of :class:`.SubredditQuarantine`.
This property is named ``quaran`` because ``quarantine`` is a
Subreddit attribute returned by Reddit to indicate whether or not a
Subreddit is quarantined.
"""
return SubredditQuarantine(self)
@cachedproperty
def stream(self):
"""Provide an instance of :class:`.SubredditStream`.
Streams can be used to indefinitely retrieve new comments made to a
subreddit, like:
.. code:: python
for comment in reddit.subreddit('iama').stream.comments():
print(comment)
Additionally, new submissions can be retrieved via the stream. In the
following example all submissions are fetched via the special subreddit
``all``:
.. code:: python
for submission in reddit.subreddit('all').stream.submissions():
print(submission)
"""
return SubredditStream(self)
@cachedproperty
def stylesheet(self):
"""Provide an instance of :class:`.SubredditStylesheet`."""
return SubredditStylesheet(self)
@cachedproperty
def widgets(self):
"""Provide an instance of :class:`.SubredditWidgets`.
**Example usage**
Get all sidebar widgets:
.. code-block:: python
for widget in reddit.subreddit('redditdev').widgets.sidebar:
print(widget)
Get ID card widget:
.. code-block:: python
print(reddit.subreddit('redditdev').widgets.id_card)
"""
return SubredditWidgets(self)
@cachedproperty
def wiki(self):
"""Provide an instance of :class:`.SubredditWiki`.
This attribute can be used to discover all wikipages for a subreddit:
.. code:: python
for wikipage in reddit.subreddit('iama').wiki:
print(wikipage)
To fetch the content for a given wikipage try:
.. code:: python
wikipage = reddit.subreddit('iama').wiki['proof']
print(wikipage.content_md)
"""
return SubredditWiki(self)
def __init__(self, reddit, display_name=None, _data=None):
"""Initialize a Subreddit instance.
:param reddit: An instance of :class:`~.Reddit`.
:param display_name: The name of the subreddit.
.. note:: This class should not be initialized directly. Instead obtain
an instance via: ``reddit.subreddit('subreddit_name')``
"""
if bool(display_name) == bool(_data):
raise TypeError(
"Either `display_name` or `_data` must be provided."
)
super(Subreddit, self).__init__(reddit, _data=_data)
if display_name:
self.display_name = display_name
self._path = API_PATH["subreddit"].format(subreddit=self)
def _fetch_info(self):
return ("subreddit_about", {"subreddit": self}, None)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
data = data["data"]
other = type(self)(self._reddit, _data=data)
self.__dict__.update(other.__dict__)
self._fetched = True
def _submit_media(self, data, timeout):
"""Submit and return an `image`, `video`, or `videogif`.
This is a helper method for submitting posts that are not link posts or
self posts.
"""
response = self._reddit.post(API_PATH["submit"], data=data)
# About the websockets:
#
# Reddit responds to this request with only two fields: a link to
# the user's /submitted page, and a websocket URL. We can use the
# websocket URL to get a link to the new post once it is created.
#
# An important note to PRAW contributors or anyone who would
# wish to step through this section with a debugger: This block
# of code is NOT debugger-friendly. If there is *any*
# significant time between the POST request just above this
# comment and the creation of the websocket connection just
# below, the code will become stuck in an infinite loop at the
# socket.recv() call. I believe this is because only one message is
# sent over the websocket, and if the client doesn't connect
# soon enough, it will miss the message and get stuck forever
# waiting for another.
#
# So if you need to debug this section of code, please let the
# websocket creation happen right after the POST request,
# otherwise you will have trouble.
if not isinstance(response, dict):
raise ClientException(
"Something went wrong with your post: {!r}".format(response)
)
try:
socket = websocket.create_connection(
response["json"]["data"]["websocket_url"], timeout=timeout
)
ws_update = loads(socket.recv())
socket.close()
except websocket.WebSocketTimeoutException:
raise ClientException(
"Websocket error. Check your media file. "
"Your post may still have been created."
)
url = ws_update["payload"]["redirect"]
return self._reddit.submission(url=url)
def _upload_media(self, media_path):
"""Upload media and return its URL. Uses undocumented endpoint."""
if media_path is None:
media_path = join(
dirname(dirname(dirname(__file__))), "images", "PRAW logo.png"
)
file_name = basename(media_path).lower()
mime_type = {
"png": "image/png",
"mov": "video/quicktime",
"mp4": "video/mp4",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"gif": "image/gif",
}.get(
file_name.rpartition(".")[2], "image/jpeg"
) # default to JPEG
img_data = {"filepath": file_name, "mimetype": mime_type}
url = API_PATH["media_asset"]
# until we learn otherwise, assume this request always succeeds
upload_lease = self._reddit.post(url, data=img_data)["args"]
upload_url = "https:{}".format(upload_lease["action"])
upload_data = {
item["name"]: item["value"] for item in upload_lease["fields"]
}
with open(media_path, "rb") as media:
response = self._reddit._core._requestor._http.post(
upload_url, data=upload_data, files={"file": media}
)
response.raise_for_status()
return upload_url + "/" + upload_data["key"]
def random(self):
"""Return a random Submission.
Returns ``None`` on subreddits that do not support the random feature.
One example, at the time of writing, is /r/wallpapers.
"""
url = API_PATH["subreddit_random"].format(subreddit=self)
try:
self._reddit.get(url, params={"unique": self._reddit._next_unique})
except Redirect as redirect:
path = redirect.path
try:
return self._submission_class(
self._reddit, url=urljoin(self._reddit.config.reddit_url, path)
)
except ClientException:
return None
def rules(self):
"""Return rules for the subreddit.
For example to show the rules of ``/r/redditdev`` try:
.. code:: python
reddit.subreddit('redditdev').rules()
"""
return self._reddit.get(API_PATH["rules"].format(subreddit=self))
def search(
self,
query,
sort="relevance",
syntax="lucene",
time_filter="all",
**generator_kwargs
):
"""Return a ListingGenerator for items that match ``query``.
:param query: The query string to search for.
:param sort: Can be one of: relevance, hot, top, new,
comments. (default: relevance).
:param syntax: Can be one of: cloudsearch, lucene, plain
(default: lucene).
:param time_filter: Can be one of: all, day, hour, month, week, year
(default: all).
For more information on building a search query see:
https://www.reddit.com/wiki/search
For example to search all subreddits for ``praw`` try:
.. code:: python
for submission in reddit.subreddit('all').search('praw'):
print(submission.title)
"""
self._validate_time_filter(time_filter)
not_all = self.display_name.lower() != "all"
self._safely_add_arguments(
generator_kwargs,
"params",
q=query,
restrict_sr=not_all,
sort=sort,
syntax=syntax,
t=time_filter,
)
url = API_PATH["search"].format(subreddit=self)
return ListingGenerator(self._reddit, url, **generator_kwargs)
def sticky(self, number=1):
"""Return a Submission object for a sticky of the subreddit.
:param number: Specify which sticky to return. 1 appears at the top
(default: 1).
Raises ``prawcore.NotFound`` if the sticky does not exist.
"""
url = API_PATH["about_sticky"].format(subreddit=self)
try:
self._reddit.get(url, params={"num": number})
except Redirect as redirect:
path = redirect.path
return self._submission_class(
self._reddit, url=urljoin(self._reddit.config.reddit_url, path)
)
def submit(
self,
title,
selftext=None,
url=None,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
collection_id=None,
):
"""Add a submission to the subreddit.
:param title: The title of the submission.
:param selftext: The markdown formatted content for a ``text``
submission. Use an empty string, ``''``, to make a title-only
submission.
:param url: The URL for a ``link`` submission.
:param collection_id: The UUID of a :class:`.Collection` to add the
newly-submitted post to.
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is
True, this value will set a custom text (default: None).
:param resubmit: When False, an error will occur if the URL has already
been submitted (default: True).
:param send_replies: When True, messages will be sent to the submission
author when comments are made to the submission (default: True).
:param nsfw: Whether or not the submission should be marked NSFW
(default: False).
:param spoiler: Whether or not the submission should be marked as
a spoiler (default: False).
:returns: A :class:`~.Submission` object for the newly created
submission.
Either ``selftext`` or ``url`` can be provided, but not both.
For example to submit a URL to ``/r/reddit_api_test`` do:
.. code:: python
title = 'PRAW documentation'
url = 'https://praw.readthedocs.io'
reddit.subreddit('reddit_api_test').submit(title, url=url)
.. note ::
For submitting images, videos, and videogifs,
see :meth:`.submit_image` and :meth:`.submit_video`.
"""
if (bool(selftext) or selftext == "") == bool(url):
raise TypeError("Either `selftext` or `url` must be provided.")
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
):
if value is not None:
data[key] = value
if selftext is not None:
data.update(kind="self", text=selftext)
else:
data.update(kind="link", url=url)
return self._reddit.post(API_PATH["submit"], data=data)
def submit_image(
self,
title,
image_path,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
timeout=10,
collection_id=None,
):
"""Add an image submission to the subreddit.
:param title: The title of the submission.
:param image_path: The path to an image, to upload and post.
:param collection_id: The UUID of a :class:`.Collection` to add the
newly-submitted post to.
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is
True, this value will set a custom text (default: None).
:param resubmit: When False, an error will occur if the URL has already
been submitted (default: True).
:param send_replies: When True, messages will be sent to the submission
author when comments are made to the submission (default: True).
:param nsfw: Whether or not the submission should be marked NSFW
(default: False).
:param spoiler: Whether or not the submission should be marked as
a spoiler (default: False).
:param timeout: Specifies a particular timeout, in seconds. Use to
avoid "Websocket error" exceptions (default: 10).
.. note::
Reddit's API uses WebSockets to respond with the link of the
newly created post. If this fails, the method will raise
:class:`.ClientException`. Occasionally, the Reddit post will still
be created. More often, there is an error with the image file. If
you frequently get exceptions but successfully created posts, try
setting the ``timeout`` parameter to a value above 10.
:returns: A :class:`~.Submission` object for the newly created
submission.
For example to submit an image to ``/r/reddit_api_test`` do:
.. code:: python
title = 'My favorite picture'
image = '/path/to/image.png'
reddit.subreddit('reddit_api_test').submit_image(title, image)
"""
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
):
if value is not None:
data[key] = value
data.update(kind="image", url=self._upload_media(image_path))
return self._submit_media(data, timeout)
def submit_video(
self,
title,
video_path,
videogif=False,
thumbnail_path=None,
flair_id=None,
flair_text=None,
resubmit=True,
send_replies=True,
nsfw=False,
spoiler=False,
timeout=10,
collection_id=None,
):
"""Add a video or videogif submission to the subreddit.
:param title: The title of the submission.
:param video_path: The path to a video, to upload and post.
:param videogif: A ``bool`` value. If ``True``, the video is
uploaded as a videogif, which is essentially a silent video
(default: ``False``).
:param thumbnail_path: (Optional) The path to an image, to be uploaded
and used as the thumbnail for this video. If not provided, the
PRAW logo will be used as the thumbnail.
:param collection_id: The UUID of a :class:`.Collection` to add the
newly-submitted post to.
:param flair_id: The flair template to select (default: ``None``).
:param flair_text: If the template's ``flair_text_editable`` value is
True, this value will set a custom text (default: ``None``).
:param resubmit: When False, an error will occur if the URL has already
been submitted (default: ``True``).
:param send_replies: When True, messages will be sent to the submission
author when comments are made to the submission
(default: ``True``).
:param nsfw: Whether or not the submission should be marked NSFW
(default: False).
:param spoiler: Whether or not the submission should be marked as
a spoiler (default: False).
:param timeout: Specifies a particular timeout, in seconds. Use to
avoid "Websocket error" exceptions (default: 10).
.. note::
Reddit's API uses WebSockets to respond with the link of the
newly created post. If this fails, the method will raise
:class:`.ClientException`. Occasionally, the Reddit post will still
be created. More often, there is an error with the video file. If
you frequently get exceptions but successfully created posts, try
setting the ``timeout`` parameter to a value above 10.
:returns: A :class:`~.Submission` object for the newly created
submission.
For example to submit a video to ``/r/reddit_api_test`` do:
.. code:: python
title = 'My favorite movie'
video = '/path/to/video.mp4'
reddit.subreddit('reddit_api_test').submit_video(title, video)
"""
data = {
"sr": str(self),
"resubmit": bool(resubmit),
"sendreplies": bool(send_replies),
"title": title,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (
("flair_id", flair_id),
("flair_text", flair_text),
("collection_id", collection_id),
):
if value is not None:
data[key] = value
data.update(
kind="videogif" if videogif else "video",
url=self._upload_media(video_path),
# if thumbnail_path is None, it uploads the PRAW logo
video_poster_url=self._upload_media(thumbnail_path),
)
return self._submit_media(data, timeout)
def subscribe(self, other_subreddits=None):
"""Subscribe to the subreddit.
:param other_subreddits: When provided, also subscribe to the provided
list of subreddits.
"""
data = {
"action": "sub",
"skip_inital_defaults": True,
"sr_name": self._subreddit_list(self, other_subreddits),
}
self._reddit.post(API_PATH["subscribe"], data=data)
def traffic(self):
"""Return a dictionary of the subreddit's traffic statistics.
Raises ``prawcore.NotFound`` when the traffic stats aren't available to
the authenticated user, that is, they are not public and the
authenticated user is not a moderator of the subreddit.
"""
return self._reddit.get(
API_PATH["about_traffic"].format(subreddit=self)
)
def unsubscribe(self, other_subreddits=None):
"""Unsubscribe from the subreddit.
:param other_subreddits: When provided, also unsubscribe to the
provided list of subreddits.
"""
data = {
"action": "unsub",
"sr_name": self._subreddit_list(self, other_subreddits),
}
self._reddit.post(API_PATH["subscribe"], data=data)
class SubredditFilters(object):
"""Provide functions to interact with the special Subreddit's filters.
Members of this class should be utilized via ``Subreddit.filters``. For
example to add a filter run:
.. code:: python
reddit.subreddit('all').filters.add('subreddit_name')
"""
def __init__(self, subreddit):
"""Create a SubredditFilters instance.
:param subreddit: The special subreddit whose filters to work with.
As of this writing filters can only be used with the special subreddits
``all`` and ``mod``.
"""
self.subreddit = subreddit
def __iter__(self):
"""Iterate through the special subreddit's filters.
This method should be invoked as:
.. code:: python
for subreddit in reddit.subreddit('NAME').filters:
...
"""
url = API_PATH["subreddit_filter_list"].format(
special=self.subreddit, user=self.subreddit._reddit.user.me()
)
params = {"unique": self.subreddit._reddit._next_unique}
response_data = self.subreddit._reddit.get(url, params=params)
for subreddit in response_data.subreddits:
yield subreddit
def add(self, subreddit):
"""Add ``subreddit`` to the list of filtered subreddits.
:param subreddit: The subreddit to add to the filter list.
Items from subreddits added to the filtered list will no longer be
included when obtaining listings for ``/r/all``.
Alternatively, you can filter a subreddit temporarily from a special
listing in a manner like so:
.. code:: python
reddit.subreddit('all-redditdev-learnpython')
Raises ``prawcore.NotFound`` when calling on a non-special subreddit.
"""
url = API_PATH["subreddit_filter"].format(
special=self.subreddit,
user=self.subreddit._reddit.user.me(),
subreddit=subreddit,
)
self.subreddit._reddit.request(
"PUT", url, data={"model": dumps({"name": str(subreddit)})}
)
def remove(self, subreddit):
"""Remove ``subreddit`` from the list of filtered subreddits.
:param subreddit: The subreddit to remove from the filter list.
Raises ``prawcore.NotFound`` when calling on a non-special subreddit.
"""
url = API_PATH["subreddit_filter"].format(
special=self.subreddit,
user=self.subreddit._reddit.user.me(),
subreddit=str(subreddit),
)
self.subreddit._reddit.request("DELETE", url, data={})
class SubredditFlair(object):
"""Provide a set of functions to interact with a Subreddit's flair."""
@cachedproperty
def link_templates(self):
"""Provide an instance of :class:`.SubredditLinkFlairTemplates`.
Use this attribute for interacting with a subreddit's link flair
templates. For example to list all the link flair templates for a
subreddit which you have the ``flair`` moderator permission on try:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.link_templates:
print(template)
"""
return SubredditLinkFlairTemplates(self.subreddit)
@cachedproperty
def templates(self):
"""Provide an instance of :class:`.SubredditRedditorFlairTemplates`.
Use this attribute for interacting with a subreddit's flair
templates. For example to list all the flair templates for a subreddit
which you have the ``flair`` moderator permission on try:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.templates:
print(template)
"""
return SubredditRedditorFlairTemplates(self.subreddit)
def __call__(self, redditor=None, **generator_kwargs):
"""Return a generator for Redditors and their associated flair.
:param redditor: When provided, yield at most a single
:class:`~.Redditor` instance (default: None).
This method is intended to be used like:
.. code-block:: python
for flair in reddit.subreddit('NAME').flair(limit=None):
print(flair)
"""
Subreddit._safely_add_arguments(
generator_kwargs, "params", name=redditor
)
generator_kwargs.setdefault("limit", None)
url = API_PATH["flairlist"].format(subreddit=self.subreddit)
return ListingGenerator(
self.subreddit._reddit, url, **generator_kwargs
)
def __init__(self, subreddit):
"""Create a SubredditFlair instance.
:param subreddit: The subreddit whose flair to work with.
"""
self.subreddit = subreddit
def configure(
self,
position="right",
self_assign=False,
link_position="left",
link_self_assign=False,
**settings
):
"""Update the subreddit's flair configuration.
:param position: One of left, right, or False to disable (default:
right).
:param self_assign: (boolean) Permit self assignment of user flair
(default: False).
:param link_position: One of left, right, or False to disable
(default: left).
:param link_self_assign: (boolean) Permit self assignment
of link flair (default: False).
Additional keyword arguments can be provided to handle new settings as
Reddit introduces them.
"""
data = {
"flair_enabled": bool(position),
"flair_position": position or "right",
"flair_self_assign_enabled": self_assign,
"link_flair_position": link_position or "",
"link_flair_self_assign_enabled": link_self_assign,
}
data.update(settings)
url = API_PATH["flairconfig"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def delete(self, redditor):
"""Delete flair for a Redditor.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
.. note:: To delete the flair of many Redditors at once, please see
:meth:`~praw.models.reddit.subreddit.SubredditFlair.update`.
"""
url = API_PATH["deleteflair"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"name": str(redditor)})
def delete_all(self):
"""Delete all Redditor flair in the Subreddit.
:returns: List of dictionaries indicating the success or failure of
each delete.
"""
return self.update(x["user"] for x in self())
def set(
self, redditor=None, text="", css_class="", flair_template_id=None
):
"""Set flair for a Redditor.
:param redditor: (Required) A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param text: The flair text to associate with the Redditor or
Submission (default: '').
:param css_class: The css class to associate with the flair html
(default: ''). Use either this or ``flair_template_id``.
:param flair_template_id: The ID of the flair template to be used
(default: ``None``). Use either this or ``css_class``.
This method can only be used by an authenticated user who is a
moderator of the associated Subreddit.
Example:
.. code:: python
reddit.subreddit('redditdev').flair.set('bboe', 'PRAW author',
css_class='mods')
template = '6bd28436-1aa7-11e9-9902-0e05ab0fad46'
reddit.subreddit('redditdev').flair.set('spez', 'Reddit CEO',
flair_template_id=template)
"""
if css_class and flair_template_id is not None:
raise TypeError(
"Parameter `css_class` cannot be used in "
"conjunction with `flair_template_id`."
)
data = {"name": str(redditor), "text": text}
if flair_template_id is not None:
data["flair_template_id"] = flair_template_id
url = API_PATH["select_flair"].format(subreddit=self.subreddit)
else:
data["css_class"] = css_class
url = API_PATH["flair"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def update(self, flair_list, text="", css_class=""):
"""Set or clear the flair for many Redditors at once.
:param flair_list: Each item in this list should be either: the name of
a Redditor, an instance of :class:`.Redditor`, or a dictionary
mapping keys ``user``, ``flair_text``, and ``flair_css_class`` to
their respective values. The ``user`` key should map to a Redditor,
as described above. When a dictionary isn't provided, or the
dictionary is missing one of ``flair_text``, or ``flair_css_class``
attributes the default values will come from the the following
arguments.
:param text: The flair text to use when not explicitly provided in
``flair_list`` (default: '').
:param css_class: The css class to use when not explicitly provided in
``flair_list`` (default: '').
:returns: List of dictionaries indicating the success or failure of
each update.
For example to clear the flair text, and set the ``praw`` flair css
class on a few users try:
.. code:: python
subreddit.flair.update(['bboe', 'spez', 'spladug'],
css_class='praw')
"""
lines = []
for item in flair_list:
if isinstance(item, dict):
fmt_data = (
str(item["user"]),
item.get("flair_text", text),
item.get("flair_css_class", css_class),
)
else:
fmt_data = (str(item), text, css_class)
lines.append('"{}","{}","{}"'.format(*fmt_data))
response = []
url = API_PATH["flaircsv"].format(subreddit=self.subreddit)
while lines:
data = {"flair_csv": "\n".join(lines[:100])}
response.extend(self.subreddit._reddit.post(url, data=data))
lines = lines[100:]
return response
class SubredditFlairTemplates(object):
"""Provide functions to interact with a Subreddit's flair templates."""
@staticmethod
def flair_type(is_link):
"""Return LINK_FLAIR or USER_FLAIR depending on ``is_link`` value."""
return "LINK_FLAIR" if is_link else "USER_FLAIR"
def __init__(self, subreddit):
"""Create a SubredditFlairTemplate instance.
:param subreddit: The subreddit whose flair templates to work with.
.. note:: This class should not be initialized directly. Instead obtain
an instance via:
``reddit.subreddit('subreddit_name').flair.templates`` or
``reddit.subreddit('subreddit_name').flair.link_templates``.
"""
self.subreddit = subreddit
def _add(
self,
text,
css_class="",
text_editable=False,
is_link=None,
background_color=None,
text_color=None,
mod_only=None,
):
if css_class and any(
param is not None
for param in (background_color, text_color, mod_only)
):
raise TypeError(
"Parameter `css_class` cannot be used in "
"conjunction with parameters `background_color`, "
"`text_color`, or `mod_only`."
)
if css_class:
url = API_PATH["flairtemplate"].format(subreddit=self.subreddit)
data = {
"css_class": css_class,
"flair_type": self.flair_type(is_link),
"text": text,
"text_editable": bool(text_editable),
}
else:
url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit)
data = {
"background_color": background_color,
"text_color": text_color,
"flair_type": self.flair_type(is_link),
"text": text,
"text_editable": bool(text_editable),
"mod_only": bool(mod_only),
}
self.subreddit._reddit.post(url, data=data)
def _clear(self, is_link=None):
url = API_PATH["flairtemplateclear"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(
url, data={"flair_type": self.flair_type(is_link)}
)
def delete(self, template_id):
"""Remove a flair template provided by ``template_id``.
For example, to delete the first Redditor flair template listed, try:
.. code-block:: python
template_info = list(subreddit.flair.templates)[0]
subreddit.flair.templates.delete(template_info['id'])
"""
url = API_PATH["flairtemplatedelete"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(
url, data={"flair_template_id": template_id}
)
def update(
self,
template_id,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
):
"""Update the flair template provided by ``template_id``.
:param template_id: The flair template to update.
:param text: The flair template's new text (required).
:param css_class: The flair template's new css_class (default: '').
Cannot be used in conjunction with ``background_color``,
``text_color``, or ``mod_only``.
:param text_editable: (boolean) Indicate if the flair text can be
modified for each Redditor that sets it (default: False).
:param background_color: The flair template's new background color,
as a hex color. Cannot be used in conjunction with ``css_class``.
:param text_color: The flair template's new text color, either
``'light'`` or ``'dark'``. Cannot be used in conjunction with
``css_class``.
:param mod_only: (boolean) Indicate if the flair can only be used by
moderators. Cannot be used in conjunction with ``css_class``.
For example to make a user flair template text_editable, try:
.. code-block:: python
template_info = list(subreddit.flair.templates)[0]
subreddit.flair.templates.update(
template_info['id'],
template_info['flair_text'],
text_editable=True)
.. note::
Any parameters not provided will be set to default values (usually
``None`` or ``False``) on Reddit's end.
"""
if css_class and any(
param is not None
for param in (background_color, text_color, mod_only)
):
raise TypeError(
"Parameter `css_class` cannot be used in "
"conjunction with parameters `background_color`, "
"`text_color`, or `mod_only`."
)
if css_class:
url = API_PATH["flairtemplate"].format(subreddit=self.subreddit)
data = {
"css_class": css_class,
"flair_template_id": template_id,
"text": text,
"text_editable": bool(text_editable),
}
else:
url = API_PATH["flairtemplate_v2"].format(subreddit=self.subreddit)
data = {
"flair_template_id": template_id,
"text": text,
"background_color": background_color,
"text_color": text_color,
"text_editable": text_editable,
"mod_only": mod_only,
}
self.subreddit._reddit.post(url, data=data)
class SubredditRedditorFlairTemplates(SubredditFlairTemplates):
"""Provide functions to interact with Redditor flair templates."""
def __iter__(self):
"""Iterate through the user flair templates.
Example:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.templates:
print(template)
"""
url = API_PATH["user_flair"].format(subreddit=self.subreddit)
params = {"unique": self.subreddit._reddit._next_unique}
for template in self.subreddit._reddit.get(url, params=params):
yield template
def add(
self,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
):
"""Add a Redditor flair template to the associated subreddit.
:param text: The flair template's text (required).
:param css_class: The flair template's css_class (default: '').
Cannot be used in conjunction with ``background_color``,
``text_color``, or ``mod_only``.
:param text_editable: (boolean) Indicate if the flair text can be
modified for each Redditor that sets it (default: False).
:param background_color: The flair template's new background color,
as a hex color. Cannot be used in conjunction with ``css_class``.
:param text_color: The flair template's new text color, either
``'light'`` or ``'dark'``. Cannot be used in conjunction with
``css_class``.
:param mod_only: (boolean) Indicate if the flair can only be used by
moderators. Cannot be used in conjunction with ``css_class``.
For example, to add an editable Redditor flair try:
.. code-block:: python
reddit.subreddit('NAME').flair.templates.add(
css_class='praw', text_editable=True)
"""
self._add(
text,
css_class=css_class,
text_editable=text_editable,
is_link=False,
background_color=background_color,
text_color=text_color,
mod_only=mod_only,
)
def clear(self):
"""Remove all Redditor flair templates from the subreddit.
For example:
.. code-block:: python
reddit.subreddit('NAME').flair.templates.clear()
"""
self._clear(is_link=False)
class SubredditLinkFlairTemplates(SubredditFlairTemplates):
"""Provide functions to interact with link flair templates."""
def __iter__(self):
"""Iterate through the link flair templates.
Example:
.. code-block:: python
for template in reddit.subreddit('NAME').flair.link_templates:
print(template)
"""
url = API_PATH["link_flair"].format(subreddit=self.subreddit)
for template in self.subreddit._reddit.get(url):
yield template
def add(
self,
text,
css_class="",
text_editable=False,
background_color=None,
text_color=None,
mod_only=None,
):
"""Add a link flair template to the associated subreddit.
:param text: The flair template's text (required).
:param css_class: The flair template's css_class (default: '').
Cannot be used in conjunction with ``background_color``,
``text_color``, or ``mod_only``.
:param text_editable: (boolean) Indicate if the flair text can be
modified for each Redditor that sets it (default: False).
:param background_color: The flair template's new background color,
as a hex color. Cannot be used in conjunction with ``css_class``.
:param text_color: The flair template's new text color, either
``'light'`` or ``'dark'``. Cannot be used in conjunction with
``css_class``.
:param mod_only: (boolean) Indicate if the flair can only be used by
moderators. Cannot be used in conjunction with ``css_class``.
For example, to add an editable link flair try:
.. code-block:: python
reddit.subreddit('NAME').flair.link_templates.add(
css_class='praw', text_editable=True)
"""
self._add(
text,
css_class=css_class,
text_editable=text_editable,
is_link=True,
background_color=background_color,
text_color=text_color,
mod_only=mod_only,
)
def clear(self):
"""Remove all link flair templates from the subreddit.
For example:
.. code-block:: python
reddit.subreddit('NAME').flair.link_templates.clear()
"""
self._clear(is_link=True)
class SubredditModeration(object):
"""Provides a set of moderation functions to a Subreddit."""
@staticmethod
def _handle_only(only, generator_kwargs):
if only is not None:
if only == "submissions":
only = "links"
RedditBase._safely_add_arguments(
generator_kwargs, "params", only=only
)
def __init__(self, subreddit):
"""Create a SubredditModeration instance.
:param subreddit: The subreddit to moderate.
"""
self.subreddit = subreddit
def accept_invite(self):
"""Accept an invitation as a moderator of the community."""
url = API_PATH["accept_mod_invite"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def edited(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for edited comments and submissions.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print all items in the edited queue try:
.. code:: python
for item in reddit.subreddit('mod').mod.edited(limit=None):
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_edited"].format(subreddit=self.subreddit),
**generator_kwargs
)
def inbox(self, **generator_kwargs):
"""Return a ListingGenerator for moderator messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
See ``unread`` for unread moderator messages.
To print the last 5 moderator mail messages and their replies, try:
.. code:: python
for message in reddit.subreddit('mod').mod.inbox(limit=5):
print("From: {}, Body: {}".format(message.author, message.body))
for reply in message.replies:
print("From: {}, Body: {}".format(reply.author, reply.body))
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["moderator_messages"].format(subreddit=self.subreddit),
**generator_kwargs
)
def log(self, action=None, mod=None, **generator_kwargs):
"""Return a ListingGenerator for moderator log entries.
:param action: If given, only return log entries for the specified
action.
:param mod: If given, only return log entries for actions made by the
passed in Redditor.
To print the moderator and subreddit of the last 5 modlog entries try:
.. code:: python
for log in reddit.subreddit('mod').mod.log(limit=5):
print("Mod: {}, Subreddit: {}".format(log.mod, log.subreddit))
"""
params = {"mod": str(mod) if mod else mod, "type": action}
Subreddit._safely_add_arguments(generator_kwargs, "params", **params)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_log"].format(subreddit=self.subreddit),
**generator_kwargs
)
def modqueue(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for comments/submissions in the modqueue.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print all modqueue items try:
.. code:: python
for item in reddit.subreddit('mod').mod.modqueue(limit=None):
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_modqueue"].format(subreddit=self.subreddit),
**generator_kwargs
)
def reports(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for reported comments and submissions.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the user and mod report reasons in the report queue try:
.. code:: python
for reported_item in reddit.subreddit('mod').mod.reports():
print("User Reports: {}".format(reported_item.user_reports))
print("Mod Reports: {}".format(reported_item.mod_reports))
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_reports"].format(subreddit=self.subreddit),
**generator_kwargs
)
def settings(self):
"""Return a dictionary of the subreddit's current settings."""
url = API_PATH["subreddit_settings"].format(subreddit=self.subreddit)
return self.subreddit._reddit.get(url)["data"]
def spam(self, only=None, **generator_kwargs):
"""Return a ListingGenerator for spam comments and submissions.
:param only: If specified, one of ``'comments'``, or ``'submissions'``
to yield only results of that type.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the items in the spam queue try:
.. code:: python
for item in reddit.subreddit('mod').mod.spam():
print(item)
"""
self._handle_only(only, generator_kwargs)
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_spam"].format(subreddit=self.subreddit),
**generator_kwargs
)
def unmoderated(self, **generator_kwargs):
"""Return a ListingGenerator for unmoderated submissions.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To print the items in the unmoderated queue try:
.. code:: python
for item in reddit.subreddit('mod').mod.unmoderated():
print(item)
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["about_unmoderated"].format(subreddit=self.subreddit),
**generator_kwargs
)
def unread(self, **generator_kwargs):
"""Return a ListingGenerator for unread moderator messages.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
See ``inbox`` for all messages.
To print the mail in the unread modmail queue try:
.. code:: python
for message in reddit.subreddit('mod').mod.unread():
print("From: {}, To: {}".format(message.author, message.dest))
"""
return ListingGenerator(
self.subreddit._reddit,
API_PATH["moderator_unread"].format(subreddit=self.subreddit),
**generator_kwargs
)
def update(self, **settings):
"""Update the subreddit's settings.
:param allow_images: Allow users to upload images using the native
image hosting. Only applies to link-only subreddits.
:param allow_post_crossposts: Allow users to crosspost submissions from
other subreddits.
:param allow_top: Allow the subreddit to appear on ``/r/all`` as well
as the default and trending lists.
:param collapse_deleted_comments: Collapse deleted and removed comments
on comments pages by default.
:param comment_score_hide_mins: The number of minutes to hide comment
scores.
:param description: Shown in the sidebar of your subreddit.
:param disable_contributor_requests: Specifies whether redditors may
send automated modmail messages requesting approval as a submitter.
:type disable_contributor_requests: bool
:param domain: Domain name with a cname that points to
{subreddit}.reddit.com.
:param exclude_banned_modqueue: Exclude posts by site-wide banned users
from modqueue/unmoderated.
:param header_hover_text: The text seen when hovering over the snoo.
:param hide_ads: Don't show ads within this subreddit. Only applies to
gold-user only subreddits.
:param key_color: A 6-digit rgb hex color (e.g. ``'#AABBCC'``), used as
a thematic color for your subreddit on mobile.
:param lang: A valid IETF language tag (underscore separated).
:param link_type: The types of submissions users can make.
One of ``any``, ``link``, ``self``.
:param over_18: Viewers must be over 18 years old (i.e. NSFW).
:param public_description: Public description blurb. Appears in search
results and on the landing page for private subreddits.
:param public_traffic: Make the traffic stats page public.
:param restrict_commenting: Specifies whether approved users have the
ability to comment.
:type restrict_commenting: bool
:param restrict_posting: Specifies whether approved users have the
ability to submit posts.
:type restrict_posting: bool
:param show_media: Show thumbnails on submissions.
:param show_media_preview: Expand media previews on comments pages.
:param spam_comments: Spam filter strength for comments.
One of ``all``, ``low``, ``high``.
:param spam_links: Spam filter strength for links.
One of ``all``, ``low``, ``high``.
:param spam_selfposts: Spam filter strength for selfposts.
One of ``all``, ``low``, ``high``.
:param spoilers_enabled: Enable marking posts as containing spoilers.
:param sr: The fullname of the subreddit whose settings will be
updated.
:param submit_link_label: Custom label for submit link button
(None for default).
:param submit_text: Text to show on submission page.
:param submit_text_label: Custom label for submit text post button
(None for default).
:param subreddit_type: One of ``archived``, ``employees_only``,
``gold_only``, ``gold_restricted``, ``private``, ``public``,
``restricted``.
:param suggested_comment_sort: All comment threads will use this
sorting method by default. Leave None, or choose one of
``confidence``, ``controversial``, ``new``, ``old``, ``qa``,
``random``, ``top``.
:param title: The title of the subreddit.
:param wiki_edit_age: Account age, in days, required to edit and create
wiki pages.
:param wiki_edit_karma: Subreddit karma required to edit and create
wiki pages.
:param wikimode: One of ``anyone``, ``disabled``, ``modonly``.
Additional keyword arguments can be provided to handle new settings as
Reddit introduces them.
Settings that are documented here and aren't explicitly set by you in a
call to :meth:`.SubredditModeration.update` should retain their current
value. If they do not please file a bug.
.. warning:: Undocumented settings, or settings that were very recently
documented, may not retain their current value when
updating. This often occurs when Reddit adds a new setting
but forgets to add that setting to the API endpoint that
is used to fetch the current settings.
"""
current_settings = self.settings()
fullname = current_settings.pop("subreddit_id")
# These attributes come out using different names than they go in.
remap = {
"allow_top": "default_set",
"lang": "language",
"link_type": "content_options",
}
for (new, old) in remap.items():
current_settings[new] = current_settings.pop(old)
current_settings.update(settings)
return Subreddit._create_or_update(
_reddit=self.subreddit._reddit, sr=fullname, **current_settings
)
class SubredditQuarantine(object):
"""Provides subreddit quarantine related methods."""
def __init__(self, subreddit):
"""Create a SubredditQuarantine instance.
:param subreddit: The subreddit associated with the quarantine.
"""
self.subreddit = subreddit
def opt_in(self):
"""Permit your user access to the quarantined subreddit.
Usage:
.. code:: python
subreddit = reddit.subreddit('QUESTIONABLE')
next(subreddit.hot()) # Raises prawcore.Forbidden
subreddit.quaran.opt_in()
next(subreddit.hot()) # Returns Submission
"""
data = {"sr_name": self.subreddit}
try:
self.subreddit._reddit.post(
API_PATH["quarantine_opt_in"], data=data
)
except Redirect:
pass
def opt_out(self):
"""Remove access to the quarantined subreddit.
Usage:
.. code:: python
subreddit = reddit.subreddit('QUESTIONABLE')
next(subreddit.hot()) # Returns Submission
subreddit.quaran.opt_out()
next(subreddit.hot()) # Raises prawcore.Forbidden
"""
data = {"sr_name": self.subreddit}
try:
self.subreddit._reddit.post(
API_PATH["quarantine_opt_out"], data=data
)
except Redirect:
pass
class SubredditRelationship(object):
"""Represents a relationship between a redditor and subreddit.
Instances of this class can be iterated through in order to discover the
Redditors that make up the relationship.
For example, banned users of a subreddit can be iterated through like so:
.. code-block:: python
for ban in reddit.subreddit('redditdev').banned():
print('{}: {}'.format(ban, ban.note))
"""
def __call__(self, redditor=None, **generator_kwargs):
"""Return a generator for Redditors belonging to this relationship.
:param redditor: When provided, yield at most a single
:class:`~.Redditor` instance. This is useful to confirm if a
relationship exists, or to fetch the metadata associated with a
particular relationship (default: None).
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
"""
Subreddit._safely_add_arguments(
generator_kwargs, "params", user=redditor
)
url = API_PATH["list_{}".format(self.relationship)].format(
subreddit=self.subreddit
)
return ListingGenerator(
self.subreddit._reddit, url, **generator_kwargs
)
def __init__(self, subreddit, relationship):
"""Create a SubredditRelationship instance.
:param subreddit: The subreddit for the relationship.
:param relationship: The name of the relationship.
"""
self.relationship = relationship
self.subreddit = subreddit
def add(self, redditor, **other_settings):
"""Add ``redditor`` to this relationship.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
"""
data = {"name": str(redditor), "type": self.relationship}
data.update(other_settings)
url = API_PATH["friend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def remove(self, redditor):
"""Remove ``redditor`` from this relationship.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
"""
data = {"name": str(redditor), "type": self.relationship}
url = API_PATH["unfriend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
class ContributorRelationship(SubredditRelationship):
"""Provides methods to interact with a Subreddit's contributors.
Contributors are also known as approved submitters.
Contributors of a subreddit can be iterated through like so:
.. code-block:: python
for contributor in reddit.subreddit('redditdev').contributor():
print(contributor)
"""
def leave(self):
"""Abdicate the contributor position."""
self.subreddit._reddit.post(
API_PATH["leavecontributor"], data={"id": self.subreddit.fullname}
)
class ModeratorRelationship(SubredditRelationship):
"""Provides methods to interact with a Subreddit's moderators.
Moderators of a subreddit can be iterated through like so:
.. code-block:: python
for moderator in reddit.subreddit('redditdev').moderator():
print(moderator)
"""
PERMISSIONS = {"access", "config", "flair", "mail", "posts", "wiki"}
@staticmethod
def _handle_permissions(permissions, other_settings):
other_settings = deepcopy(other_settings) if other_settings else {}
other_settings["permissions"] = permissions_string(
permissions, ModeratorRelationship.PERMISSIONS
)
return other_settings
def __call__(self, redditor=None): # pylint: disable=arguments-differ
"""Return a list of Redditors who are moderators.
:param redditor: When provided, return a list containing at most one
:class:`~.Redditor` instance. This is useful to confirm if a
relationship exists, or to fetch the metadata associated with a
particular relationship (default: None).
.. note:: Unlike other relationship callables, this relationship is not
paginated. Thus it simply returns the full list, rather than
an iterator for the results.
To be used like:
.. code:: python
moderators = reddit.subreddit('nameofsub').moderator()
For example, to list the moderators along with their permissions try:
.. code:: python
for moderator in reddit.subreddit('SUBREDDIT').moderator():
print('{}: {}'.format(moderator, moderator.mod_permissions))
"""
params = {} if redditor is None else {"user": redditor}
url = API_PATH["list_{}".format(self.relationship)].format(
subreddit=self.subreddit
)
return self.subreddit._reddit.get(url, params=params)
# pylint: disable=arguments-differ
def add(self, redditor, permissions=None, **other_settings):
"""Add or invite ``redditor`` to be a moderator of the subreddit.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided ``None``, indicates full permissions.
An invite will be sent unless the user making this call is an admin
user.
For example, to invite ``'spez'`` with ``'posts'`` and ``'mail'``
permissions to ``'/r/test/``, try:
.. code:: python
reddit.subreddit('test').moderator.add('spez', ['posts', 'mail'])
"""
other_settings = self._handle_permissions(permissions, other_settings)
super(ModeratorRelationship, self).add(redditor, **other_settings)
# pylint: enable=arguments-differ
def invite(self, redditor, permissions=None, **other_settings):
"""Invite ``redditor`` to be a moderator of the subreddit.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided ``None``, indicates full permissions.
For example, to invite ``'spez'`` with ``'posts'`` and ``'mail'``
permissions to ``'/r/test/``, try:
.. code:: python
reddit.subreddit('test').moderator.invite('spez', ['posts', 'mail'])
"""
data = self._handle_permissions(permissions, other_settings)
data.update({"name": str(redditor), "type": "moderator_invite"})
url = API_PATH["friend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def leave(self):
"""Abdicate the moderator position (use with care).
Example:
.. code:: python
reddit.subreddit('subredditname').moderator.leave()
"""
self.remove(self.subreddit._reddit.config.username)
def remove_invite(self, redditor):
"""Remove the moderator invite for ``redditor``.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
Example:
.. code:: python
reddit.subreddit('subredditname').moderator.remove_invite('spez')
"""
data = {"name": str(redditor), "type": "moderator_invite"}
url = API_PATH["unfriend"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def update(self, redditor, permissions=None):
"""Update the moderator permissions for ``redditor``.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided, ``None``, indicates full permissions.
For example, to add all permissions to the moderator, try:
.. code:: python
subreddit.moderator.update('spez')
To remove all permissions from the moderator, try:
.. code:: python
subreddit.moderator.update('spez', [])
"""
url = API_PATH["setpermissions"].format(subreddit=self.subreddit)
data = self._handle_permissions(
permissions, {"name": str(redditor), "type": "moderator"}
)
self.subreddit._reddit.post(url, data=data)
def update_invite(self, redditor, permissions=None):
"""Update the moderator invite permissions for ``redditor``.
:param redditor: A redditor name (e.g., ``'spez'``) or
:class:`~.Redditor` instance.
:param permissions: When provided (not ``None``), permissions should be
a list of strings specifying which subset of permissions to
grant. An empty list ``[]`` indicates no permissions, and when not
provided, ``None``, indicates full permissions.
For example, to grant the flair and mail permissions to the moderator
invite, try:
.. code:: python
subreddit.moderator.update_invite('spez', ['flair', 'mail'])
"""
url = API_PATH["setpermissions"].format(subreddit=self.subreddit)
data = self._handle_permissions(
permissions, {"name": str(redditor), "type": "moderator_invite"}
)
self.subreddit._reddit.post(url, data=data)
class Modmail(object):
"""Provides modmail functions for a subreddit."""
def __call__(self, id=None, mark_read=False): # noqa: D207, D301
"""Return an individual conversation.
:param id: A reddit base36 conversation ID, e.g., ``2gmz``.
:param mark_read: If True, conversation is marked as read
(default: False).
Example:
.. code:: python
reddit.subreddit('redditdev').modmail('2gmz', mark_read=True)
To print all messages from a conversation as Markdown source:
.. code:: python
conversation = reddit.subreddit('redditdev').modmail('2gmz', \
mark_read=True)
for message in conversation.messages:
print(message.body_markdown)
``ModmailConversation.user`` is a special instance of
:class:`.Redditor` with extra attributes describing the non-moderator
user's recent posts, comments, and modmail messages within the
subreddit, as well as information on active bans and mutes. This
attribute does not exist on internal moderator discussions.
For example, to print the user's ban status:
.. code:: python
conversation = reddit.subreddit('redditdev').modmail('2gmz', \
mark_read=True)
print(conversation.user.ban_status)
To print a list of recent submissions by the user:
.. code:: python
conversation = reddit.subreddit('redditdev').modmail('2gmz', \
mark_read=True)
print(conversation.user.recent_posts)
"""
# pylint: disable=invalid-name,redefined-builtin
return ModmailConversation(
self.subreddit._reddit, id=id, mark_read=mark_read
)
def __init__(self, subreddit):
"""Construct an instance of the Modmail object."""
self.subreddit = subreddit
def _build_subreddit_list(self, other_subreddits):
"""Return a comma-separated list of subreddit display names."""
subreddits = [self.subreddit] + (other_subreddits or [])
return ",".join(str(subreddit) for subreddit in subreddits)
def bulk_read(self, other_subreddits=None, state=None):
"""Mark conversations for subreddit(s) as read.
Due to server-side restrictions, 'all' is not a valid subreddit for
this method. Instead, use :meth:`~.Modmail.subreddits` to get a list of
subreddits using the new modmail.
:param other_subreddits: A list of :class:`.Subreddit` instances for
which to mark conversations (default: None).
:param state: Can be one of: all, archived, highlighted, inprogress,
mod, new, notifications, (default: all). "all" does not include
internal or archived conversations.
:returns: A list of :class:`.ModmailConversation` instances that were
marked read.
For example, to mark all notifications for a subreddit as read:
.. code:: python
subreddit = reddit.subreddit('redditdev')
subreddit.modmail.bulk_read(state='notifications')
"""
params = {"entity": self._build_subreddit_list(other_subreddits)}
if state:
params["state"] = state
response = self.subreddit._reddit.post(
API_PATH["modmail_bulk_read"], params=params
)
return [
self(conversation_id)
for conversation_id in response["conversation_ids"]
]
def conversations(
self,
after=None,
limit=None,
other_subreddits=None,
sort=None,
state=None,
): # noqa: D207, D301
"""Generate :class:`.ModmailConversation` objects for subreddit(s).
:param after: A base36 modmail conversation id. When provided, the
listing begins after this conversation (default: None).
:param limit: The maximum number of conversations to fetch. If None,
the server-side default is 25 at the time of writing
(default: None).
:param other_subreddits: A list of :class:`.Subreddit` instances for
which to fetch conversations (default: None).
:param sort: Can be one of: mod, recent, unread, user
(default: recent).
:param state: Can be one of: all, archived, highlighted, inprogress,
mod, new, notifications, (default: all). "all" does not include
internal or archived conversations.
Example:
.. code:: python
conversations = reddit.subreddit('all').modmail.conversations(\
state='mod')
"""
params = {}
if self.subreddit != "all":
params["entity"] = self._build_subreddit_list(other_subreddits)
for name, value in {
"after": after,
"limit": limit,
"sort": sort,
"state": state,
}.items():
if value:
params[name] = value
response = self.subreddit._reddit.get(
API_PATH["modmail_conversations"], params=params
)
for conversation_id in response["conversationIds"]:
data = {
"conversation": response["conversations"][conversation_id],
"messages": response["messages"],
}
yield ModmailConversation.parse(
data, self.subreddit._reddit, convert_objects=False
)
def create(self, subject, body, recipient, author_hidden=False):
"""Create a new modmail conversation.
:param subject: The message subject. Cannot be empty.
:param body: The message body. Cannot be empty.
:param recipient: The recipient; a username or an instance of
:class:`.Redditor`.
:param author_hidden: When True, author is hidden from non-moderators
(default: False).
:returns: A :class:`.ModmailConversation` object for the newly created
conversation.
.. code:: python
subreddit = reddit.subreddit('redditdev')
redditor = reddit.redditor('bboe')
subreddit.modmail.create('Subject', 'Body', redditor)
"""
data = {
"body": body,
"isAuthorHidden": author_hidden,
"srName": self.subreddit,
"subject": subject,
"to": recipient,
}
return self.subreddit._reddit.post(
API_PATH["modmail_conversations"], data=data
)
def subreddits(self):
"""Yield subreddits using the new modmail that the user moderates.
Example:
.. code:: python
subreddits = reddit.subreddit('all').modmail.subreddits()
"""
response = self.subreddit._reddit.get(API_PATH["modmail_subreddits"])
for value in response["subreddits"].values():
subreddit = self.subreddit._reddit.subreddit(value["display_name"])
subreddit.last_updated = value["lastUpdated"]
yield subreddit
def unread_count(self):
"""Return unread conversation count by conversation state.
At time of writing, possible states are: archived, highlighted,
inprogress, mod, new, notifications.
:returns: A dict mapping conversation states to unread counts.
For example, to print the count of unread moderator discussions:
.. code:: python
subreddit = reddit.subreddit('redditdev')
unread_counts = subreddit.modmail.unread_count()
print(unread_counts['mod'])
"""
return self.subreddit._reddit.get(API_PATH["modmail_unread_count"])
class SubredditStream(object):
"""Provides submission and comment streams."""
def __init__(self, subreddit):
"""Create a SubredditStream instance.
:param subreddit: The subreddit associated with the streams.
"""
self.subreddit = subreddit
def comments(self, **stream_options):
"""Yield new comments as they become available.
Comments are yielded oldest first. Up to 100 historical comments will
initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example, to retrieve all new comments made to the ``iama``
subreddit, try:
.. code:: python
for comment in reddit.subreddit('iama').stream.comments():
print(comment)
To only retreive new submissions starting when the stream is
created, pass `skip_existing=True`:
.. code:: python
subreddit = reddit.subreddit('iama')
for comment in subreddit.stream.comments(skip_existing=True):
print(comment)
"""
return stream_generator(self.subreddit.comments, **stream_options)
def submissions(self, **stream_options):
"""Yield new submissions as they become available.
Submissions are yielded oldest first. Up to 100 historical submissions
will initially be returned.
Keyword arguments are passed to :func:`.stream_generator`.
For example to retrieve all new submissions made to all of Reddit, try:
.. code:: python
for submission in reddit.subreddit('all').stream.submissions():
print(submission)
"""
return stream_generator(self.subreddit.new, **stream_options)
class SubredditStylesheet(object):
"""Provides a set of stylesheet functions to a Subreddit."""
def __call__(self):
"""Return the subreddit's stylesheet.
To be used as:
.. code:: python
stylesheet = reddit.subreddit('SUBREDDIT').stylesheet()
"""
url = API_PATH["about_stylesheet"].format(subreddit=self.subreddit)
return self.subreddit._reddit.get(url)
def __init__(self, subreddit):
"""Create a SubredditStylesheet instance.
:param subreddit: The subreddit associated with the stylesheet.
An instance of this class is provided as:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet
"""
self.subreddit = subreddit
def _update_structured_styles(self, style_data):
url = API_PATH["structured_styles"].format(subreddit=self.subreddit)
self.subreddit._reddit.patch(url, style_data)
def _upload_image(self, image_path, data):
with open(image_path, "rb") as image:
header = image.read(len(JPEG_HEADER))
image.seek(0)
data["img_type"] = "jpg" if header == JPEG_HEADER else "png"
url = API_PATH["upload_image"].format(subreddit=self.subreddit)
response = self.subreddit._reddit.post(
url, data=data, files={"file": image}
)
if response["errors"]:
error_type = response["errors"][0]
error_value = response.get("errors_values", [""])[0]
assert error_type in [
"BAD_CSS_NAME",
"IMAGE_ERROR",
], "Please file a bug with PRAW"
raise APIException(error_type, error_value, None)
return response
def _upload_style_asset(self, image_path, image_type):
data = {"imagetype": image_type, "filepath": basename(image_path)}
data["mimetype"] = "image/jpeg"
if image_path.lower().endswith(".png"):
data["mimetype"] = "image/png"
url = API_PATH["style_asset_lease"].format(subreddit=self.subreddit)
upload_lease = self.subreddit._reddit.post(url, data=data)[
"s3UploadLease"
]
upload_data = {
item["name"]: item["value"] for item in upload_lease["fields"]
}
upload_url = "https:{}".format(upload_lease["action"])
with open(image_path, "rb") as image:
response = self.subreddit._reddit._core._requestor._http.post(
upload_url, data=upload_data, files={"file": image}
)
response.raise_for_status()
return "{}/{}".format(upload_url, upload_data["key"])
def delete_banner(self):
"""Remove the current subreddit (redesign) banner image.
Succeeds even if there is no banner image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_banner()
"""
data = {"bannerBackgroundImage": ""}
self._update_structured_styles(data)
def delete_banner_additional_image(self):
"""Remove the current subreddit (redesign) banner additional image.
Succeeds even if there is no additional image. Will also delete any
configured hover image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_banner_additional_image()
"""
data = {
"bannerPositionedImage": "",
"secondaryBannerPositionedImage": "",
}
self._update_structured_styles(data)
def delete_banner_hover_image(self):
"""Remove the current subreddit (redesign) banner hover image.
Succeeds even if there is no hover image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_banner_hover_image()
"""
data = {"secondaryBannerPositionedImage": ""}
self._update_structured_styles(data)
def delete_header(self):
"""Remove the current subreddit header image.
Succeeds even if there is no header image.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_header()
"""
url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def delete_image(self, name):
"""Remove the named image from the subreddit.
Succeeds even if the named image does not exist.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_image('smile')
"""
url = API_PATH["delete_sr_image"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data={"img_name": name})
def delete_mobile_header(self):
"""Remove the current subreddit mobile header.
Succeeds even if there is no mobile header.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_mobile_header()
"""
url = API_PATH["delete_sr_header"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def delete_mobile_icon(self):
"""Remove the current subreddit mobile icon.
Succeeds even if there is no mobile icon.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.delete_mobile_icon()
"""
url = API_PATH["delete_sr_icon"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url)
def update(self, stylesheet, reason=None):
"""Update the subreddit's stylesheet.
:param stylesheet: The CSS for the new stylesheet.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.update(
'p { color: green; }', 'color text green')
"""
data = {
"op": "save",
"reason": reason,
"stylesheet_contents": stylesheet,
}
url = API_PATH["subreddit_stylesheet"].format(subreddit=self.subreddit)
self.subreddit._reddit.post(url, data=data)
def upload(self, name, image_path):
"""Upload an image to the Subreddit.
:param name: The name to use for the image. If an image already exists
with the same name, it will be replaced.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload('smile', 'img.png')
"""
return self._upload_image(
image_path, {"name": name, "upload_type": "img"}
)
def upload_banner(self, image_path):
"""Upload an image for the subreddit's (redesign) banner image.
:param image_path: A path to a jpeg or png image.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_banner('banner.png')
"""
image_type = "bannerBackgroundImage"
image_url = self._upload_style_asset(image_path, image_type)
self._update_structured_styles({image_type: image_url})
def upload_banner_additional_image(self, image_path, align=None):
"""Upload an image for the subreddit's (redesign) additional image.
:param image_path: A path to a jpeg or png image.
:param align: Either ``left``, ``centered``, or ``right``. (default:
``left``).
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_banner_additional_image('banner.png')
"""
alignment = {}
if align is not None:
if align not in {"left", "centered", "right"}:
raise ValueError(
"align argument must be either "
"`left`, `centered`, or `right`"
)
alignment["bannerPositionedImagePosition"] = align
image_type = "bannerPositionedImage"
image_url = self._upload_style_asset(image_path, image_type)
style_data = {image_type: image_url}
if alignment:
style_data.update(alignment)
self._update_structured_styles(style_data)
def upload_banner_hover_image(self, image_path):
"""Upload an image for the subreddit's (redesign) additional image.
:param image_path: A path to a jpeg or png image.
Fails if the Subreddit does not have an additional image defined
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_banner_hover_image('banner.png')
"""
image_type = "secondaryBannerPositionedImage"
image_url = self._upload_style_asset(image_path, image_type)
self._update_structured_styles({image_type: image_url})
def upload_header(self, image_path):
"""Upload an image to be used as the Subreddit's header image.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
Example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_header('header.png')
"""
return self._upload_image(image_path, {"upload_type": "header"})
def upload_mobile_header(self, image_path):
"""Upload an image to be used as the Subreddit's mobile header.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
For example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_mobile_header(
'header.png')
"""
return self._upload_image(image_path, {"upload_type": "banner"})
def upload_mobile_icon(self, image_path):
"""Upload an image to be used as the Subreddit's mobile icon.
:param image_path: A path to a jpeg or png image.
:returns: A dictionary containing a link to the uploaded image under
the key ``img_src``.
Raises ``prawcore.TooLarge`` if the overall request body is too large.
Raises :class:`.APIException` if there are other issues with the
uploaded image. Unfortunately the exception info might not be very
specific, so try through the website with the same image to see what
the problem actually might be.
For example:
.. code:: python
reddit.subreddit('SUBREDDIT').stylesheet.upload_mobile_icon(
'icon.png')
"""
return self._upload_image(image_path, {"upload_type": "icon"})
class SubredditWiki(object):
"""Provides a set of moderation functions to a Subreddit."""
def __getitem__(self, page_name):
"""Lazily return the WikiPage for the subreddit named ``page_name``.
This method is to be used to fetch a specific wikipage, like so:
.. code:: python
wikipage = reddit.subreddit('iama').wiki['proof']
print(wikipage.content_md)
"""
return WikiPage(
self.subreddit._reddit, self.subreddit, page_name.lower()
)
def __init__(self, subreddit):
"""Create a SubredditModeration instance.
:param subreddit: The subreddit to moderate.
"""
self.banned = SubredditRelationship(subreddit, "wikibanned")
self.contributor = SubredditRelationship(subreddit, "wikicontributor")
self.subreddit = subreddit
def __iter__(self):
"""Iterate through the pages of the wiki.
This method is to be used to discover all wikipages for a subreddit:
.. code:: python
for wikipage in reddit.subreddit('iama').wiki:
print(wikipage)
"""
response = self.subreddit._reddit.get(
API_PATH["wiki_pages"].format(subreddit=self.subreddit),
params={"unique": self.subreddit._reddit._next_unique},
)
for page_name in response["data"]:
yield WikiPage(self.subreddit._reddit, self.subreddit, page_name)
def create(self, name, content, reason=None, **other_settings):
"""Create a new wiki page.
:param name: The name of the new WikiPage. This name will be
normalized.
:param content: The content of the new WikiPage.
:param reason: (Optional) The reason for the creation.
:param other_settings: Additional keyword arguments to pass.
To create the wiki page ``'praw_test'`` in ``'/r/test'`` try:
.. code:: python
reddit.subreddit('test').wiki.create(
'praw_test', 'wiki body text', reason='PRAW Test Creation')
"""
name = name.replace(" ", "_").lower()
new = WikiPage(self.subreddit._reddit, self.subreddit, name)
new.edit(content=content, reason=reason, **other_settings)
return new
def revisions(self, **generator_kwargs):
"""Return a generator for recent wiki revisions.
Additional keyword arguments are passed in the initialization of
:class:`.ListingGenerator`.
To view the wiki revisions for ``'praw_test'`` in ``'/r/test'`` try:
.. code:: python
for item in reddit.subreddit('test').wiki['praw_test'].revisions():
print(item)
"""
url = API_PATH["wiki_revisions"].format(subreddit=self.subreddit)
return WikiPage._revision_generator(
self.subreddit, url, generator_kwargs
)
| bsd-2-clause | -7,310,375,806,501,327,000 | 33.792395 | 96 | 0.59653 | false |
infoxchange/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/urlpatterns_reverse/namespace_urls.py | 51 | 1463 | from django.conf.urls.defaults import *
class URLObject(object):
def __init__(self, app_name, namespace):
self.app_name = app_name
self.namespace = namespace
def urls(self):
return patterns('',
url(r'^inner/$', 'empty_view', name='urlobject-view'),
url(r'^inner/(?P<arg1>\d+)/(?P<arg2>\d+)/$', 'empty_view', name='urlobject-view'),
), self.app_name, self.namespace
urls = property(urls)
testobj1 = URLObject('testapp', 'test-ns1')
testobj2 = URLObject('testapp', 'test-ns2')
default_testobj = URLObject('testapp', 'testapp')
otherobj1 = URLObject('nodefault', 'other-ns1')
otherobj2 = URLObject('nodefault', 'other-ns2')
urlpatterns = patterns('regressiontests.urlpatterns_reverse.views',
url(r'^normal/$', 'empty_view', name='normal-view'),
url(r'^normal/(?P<arg1>\d+)/(?P<arg2>\d+)/$', 'empty_view', name='normal-view'),
(r'^test1/', include(testobj1.urls)),
(r'^test2/', include(testobj2.urls)),
(r'^default/', include(default_testobj.urls)),
(r'^other1/', include(otherobj1.urls)),
(r'^other2/', include(otherobj2.urls)),
(r'^ns-included1/', include('regressiontests.urlpatterns_reverse.included_namespace_urls', namespace='inc-ns1')),
(r'^ns-included2/', include('regressiontests.urlpatterns_reverse.included_namespace_urls', namespace='inc-ns2')),
(r'^included/', include('regressiontests.urlpatterns_reverse.included_namespace_urls')),
)
| gpl-3.0 | -5,292,288,999,358,233,000 | 37.5 | 117 | 0.651401 | false |
matsjoyce/python-krunner | src/krunner.py | 1 | 2880 | import _krunner
import abc
from PyQt5 import QtCore
__all__ = ["AbstractRunner", "QueryMatch", "RunnerContext", "RunnerSyntax"]
for name in __all__[1:]:
cls = getattr(_krunner.Plasma, name)
globals()[name] = cls
cls.__module__ = __name__
del name
del cls
class ARMeta(type(_krunner.Plasma.AbstractRunner), abc.ABCMeta):
pass
class AbstractRunner(_krunner.Plasma.AbstractRunner, metaclass=ARMeta):
def __init__(self, parent, args):
# Using parent() seems to remove the type
self._parent = parent
super().__init__(parent, args)
# Public
@abc.abstractmethod
def match(self, runnercontext):
pass
def hasRunOptions(self):
return self._parent.hasRunOptions()
def speed(self):
return self._parent.speed()
def priority(self):
return self._parent.priority()
def ignoredTypes(self):
return self._parent.ignoredTypes()
def setIgnoredTypes(self, types):
return self._parent.setIgnoredTypes(_krunner.Plasma.RunnerContext.Types(types))
def name(self):
return self._parent.name()
def id(self):
return self._parent.id()
def description(self):
return self._parent.description()
def icon(self):
return self._parent.icon()
def syntaxes(self):
return self._parent.syntaxes()
def defaultSyntax(self):
return self._parent.defaultSyntax()
def isMatchingSuspended(self):
return self._parent.isMatchingSuspended()
# Private
def suspendMatching(self, suspend):
return self._parent.suspendMatching(suspend)
def setHasRunOptions(self, hasRunOptions):
return self._parent.setHasRunOptions(hasRunOptions)
def setSpeed(self, newSpeed):
return self._parent.setSpeed(newSpeed)
def setPriority(self, newPriority):
return self._parent.setPriority(newPriority)
def addAction(self, id, icon_or_action, text=None):
if text is None:
return self._parent.addAction(id, icon_or_action)
else:
return self._parent.addAction(id, icon_or_action, text)
def removeAction(self, id):
return self._parent.removeAction(id)
def action(self, id):
return self._parent.action(id)
def actions(self):
return self._parent.actions()
def clearActions(self):
return self._parent.clearActions()
def addSyntax(self, syntax):
return self._parent.addSyntax(syntax)
def setDefaultSyntax(self, syntax):
return self._parent.setDefaultSyntax(syntax)
def setSyntaxes(self, syns):
return self._parent.setSyntaxes(syns)
def _except_hook(type, value, tb):
# Used by plasma_runner_python to stop qFatal being called by PyQt5
import traceback
print("Exception in runner:")
traceback.print_exception(type, value, tb)
| lgpl-3.0 | 1,380,575,226,892,235,500 | 24.263158 | 87 | 0.654514 | false |
e-mission/e-mission-server | emission/analysis/plotting/leaflet_osm/ipython_helper.py | 2 | 1415 | """
Helper functions that can display leaflet maps inline in an ipython notebook
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import *
import branca.element as bre
def inline_map(m):
"""
Embeds the HTML source of the map directly into the IPython notebook.
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
"""
fig = bre.Figure()
fig.add_subplot(1,1,1).add_child(m)
return fig
def inline_maps(map_list):
"""
Embeds the HTML source of the map_list directly into the IPython notebook.
This method will not work if the map depends on any files (json data). Also this uses
the HTML5 srcdoc attribute, which may not be supported in all browsers.
map_list: 2-D array of maps. dimensions should be [nRows][nCols]. The method will throw a RuntimeError if not
nRows: Number of rows
nCols: Number of columns
"""
ncols = 2
nrows = (len(map_list)/ncols) + 1
fig = bre.Figure()
for i, m in enumerate(map_list):
fig.add_subplot(nrows,ncols,i+1).add_child(m)
return fig
| bsd-3-clause | 5,773,032,909,723,954,000 | 31.906977 | 113 | 0.703887 | false |
MakMukhi/grpc | src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py | 901 | 1528 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause | 7,592,075,577,968,472,000 | 53.571429 | 72 | 0.782723 | false |
blisseth/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 | -2,269,424,247,804,909,600 | 24.135417 | 78 | 0.612516 | false |
joferkington/numpy | doc/postprocess.py | 143 | 1481 | #!/usr/bin/env python
"""
%prog MODE FILES...
Post-processes HTML and Latex files output by Sphinx.
MODE is either 'html' or 'tex'.
"""
from __future__ import division, absolute_import, print_function
import re
import optparse
import io
def main():
p = optparse.OptionParser(__doc__)
options, args = p.parse_args()
if len(args) < 1:
p.error('no mode given')
mode = args.pop(0)
if mode not in ('html', 'tex'):
p.error('unknown mode %s' % mode)
for fn in args:
f = io.open(fn, 'r', encoding="utf-8")
try:
if mode == 'html':
lines = process_html(fn, f.readlines())
elif mode == 'tex':
lines = process_tex(f.readlines())
finally:
f.close()
f = io.open(fn, 'w', encoding="utf-8")
f.write("".join(lines))
f.close()
def process_html(fn, lines):
return lines
def process_tex(lines):
"""
Remove unnecessary section titles from the LaTeX file.
"""
new_lines = []
for line in lines:
if (line.startswith(r'\section{numpy.')
or line.startswith(r'\subsection{numpy.')
or line.startswith(r'\subsubsection{numpy.')
or line.startswith(r'\paragraph{numpy.')
or line.startswith(r'\subparagraph{numpy.')
):
pass # skip!
else:
new_lines.append(line)
return new_lines
if __name__ == "__main__":
main()
| bsd-3-clause | -1,184,995,599,391,529,500 | 22.507937 | 64 | 0.545577 | false |
arne-cl/pattern | pattern/web/pdf/encodingdb.py | 56 | 1548 | #!/usr/bin/env python2
import re
from psparser import PSLiteral
from glyphlist import glyphname2unicode
from latin_enc import ENCODING
## name2unicode
##
STRIP_NAME = re.compile(r'[0-9]+')
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m: raise KeyError(name)
return unichr(int(m.group(0)))
## EncodingDB
##
class EncodingDB(object):
std2unicode = {}
mac2unicode = {}
win2unicode = {}
pdf2unicode = {}
for (name,std,mac,win,pdf) in ENCODING:
c = name2unicode(name)
if std: std2unicode[std] = c
if mac: mac2unicode[mac] = c
if win: win2unicode[win] = c
if pdf: pdf2unicode[pdf] = c
encodings = {
'StandardEncoding': std2unicode,
'MacRomanEncoding': mac2unicode,
'WinAnsiEncoding': win2unicode,
'PDFDocEncoding': pdf2unicode,
}
@classmethod
def get_encoding(klass, name, diff=None):
cid2unicode = klass.encodings.get(name, klass.std2unicode)
if diff:
cid2unicode = cid2unicode.copy()
cid = 0
for x in diff:
if isinstance(x, int):
cid = x
elif isinstance(x, PSLiteral):
try:
cid2unicode[cid] = name2unicode(x.name)
except KeyError:
pass
cid += 1
return cid2unicode
| bsd-3-clause | 4,451,562,903,178,473,500 | 25.689655 | 66 | 0.574289 | false |
sekikn/incubator-airflow | airflow/providers/amazon/aws/operators/s3_bucket.py | 7 | 3993 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains AWS S3 operators."""
from typing import Optional
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.utils.decorators import apply_defaults
class S3CreateBucketOperator(BaseOperator):
"""
This operator creates an S3 bucket
:param bucket_name: This is bucket name you want to create
:type bucket_name: str
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
:param region_name: AWS region_name. If not specified fetched from connection.
:type region_name: Optional[str]
"""
@apply_defaults
def __init__(
self,
*,
bucket_name: str,
aws_conn_id: Optional[str] = "aws_default",
region_name: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.region_name = region_name
self.aws_conn_id = aws_conn_id
self.region_name = region_name
def execute(self, context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
if not s3_hook.check_for_bucket(self.bucket_name):
s3_hook.create_bucket(bucket_name=self.bucket_name, region_name=self.region_name)
self.log.info("Created bucket with name: %s", self.bucket_name)
else:
self.log.info("Bucket with name: %s already exists", self.bucket_name)
class S3DeleteBucketOperator(BaseOperator):
"""
This operator deletes an S3 bucket
:param bucket_name: This is bucket name you want to delete
:type bucket_name: str
:param force_delete: Forcibly delete all objects in the bucket before deleting the bucket
:type force_delete: bool
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:type aws_conn_id: Optional[str]
"""
def __init__(
self,
bucket_name: str,
force_delete: bool = False,
aws_conn_id: Optional[str] = "aws_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.force_delete = force_delete
self.aws_conn_id = aws_conn_id
def execute(self, context):
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
if s3_hook.check_for_bucket(self.bucket_name):
s3_hook.delete_bucket(bucket_name=self.bucket_name, force_delete=self.force_delete)
self.log.info("Deleted bucket with name: %s", self.bucket_name)
else:
self.log.info("Bucket with name: %s doesn't exist", self.bucket_name)
| apache-2.0 | 1,493,359,525,984,938,000 | 38.93 | 95 | 0.676434 | false |
roselleebarle04/django | tests/flatpages_tests/test_sitemaps.py | 380 | 1326 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.sites.models import Site
from django.test import TestCase
from django.test.utils import modify_settings, override_settings
@override_settings(
ROOT_URLCONF='flatpages_tests.urls',
SITE_ID=1,
)
@modify_settings(
INSTALLED_APPS={
'append': ['django.contrib.sitemaps', 'django.contrib.flatpages'],
},
)
class FlatpagesSitemapTests(TestCase):
@classmethod
def setUpClass(cls):
super(FlatpagesSitemapTests, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
@classmethod
def setUpTestData(cls):
Site = apps.get_model('sites.Site')
current_site = Site.objects.get_current()
current_site.flatpage_set.create(url="/foo/", title="foo")
current_site.flatpage_set.create(url="/private-foo/", title="private foo", registration_required=True)
def test_flatpage_sitemap(self):
response = self.client.get('/flatpages/sitemap.xml')
self.assertIn(b'<url><loc>http://example.com/foo/</loc></url>', response.getvalue())
self.assertNotIn(b'<url><loc>http://example.com/private-foo/</loc></url>', response.getvalue())
| bsd-3-clause | 7,512,587,546,436,622,000 | 34.837838 | 110 | 0.687783 | false |
blaggacao/odoo | addons/mail/tests/test_mail_group.py | 140 | 3964 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestMailGroup(TestMail):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_mail_group_access_rights(self):
""" Testing mail_group access rights and basic mail_thread features """
cr, uid, user_noone_id, user_employee_id = self.cr, self.uid, self.user_noone_id, self.user_employee_id
# Do: Bert reads Jobs -> ok, public
self.mail_group.read(cr, user_noone_id, [self.group_jobs_id])
# Do: Bert read Pigs -> ko, restricted to employees
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_noone_id, [self.group_pigs_id])
# Do: Raoul read Pigs -> ok, belong to employees
self.mail_group.read(cr, user_employee_id, [self.group_pigs_id])
# Do: Bert creates a group -> ko, no access rights
with self.assertRaises(AccessError):
self.mail_group.create(cr, user_noone_id, {'name': 'Test'})
# Do: Raoul creates a restricted group -> ok
new_group_id = self.mail_group.create(cr, user_employee_id, {'name': 'Test'})
# Do: Bert added in followers, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_noone_id])
self.mail_group.read(cr, user_noone_id, [new_group_id])
# Do: Raoul reads Priv -> ko, private
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul added in follower, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_employee_id])
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul write on Jobs -> ok
self.mail_group.write(cr, user_employee_id, [self.group_priv_id], {'name': 'modified'})
# Do: Bert cannot write on Private -> ko (read but no write)
with self.assertRaises(AccessError):
self.mail_group.write(cr, user_noone_id, [self.group_priv_id], {'name': 're-modified'})
# Test: Bert cannot unlink the group
with self.assertRaises(except_orm):
self.mail_group.unlink(cr, user_noone_id, [self.group_priv_id])
# Do: Raoul unlinks the group, there are no followers and messages left
self.mail_group.unlink(cr, user_employee_id, [self.group_priv_id])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(fol_ids, 'unlinked document should not have any followers left')
msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(msg_ids, 'unlinked document should not have any followers left')
| agpl-3.0 | -5,811,167,959,311,190,000 | 54.055556 | 126 | 0.635974 | false |
meetsandeepan/meetsandeepan.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/token.py | 365 | 5662 | # -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
| mit | -6,306,570,437,643,109,000 | 28.035897 | 70 | 0.459908 | false |
sameetb-cuelogic/edx-platform-test | lms/djangoapps/certificates/migrations/0006_auto__chg_field_generatedcertificate_certificate_id.py | 188 | 7408 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'GeneratedCertificate.certificate_id'
db.alter_column('certificates_generatedcertificate', 'certificate_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True))
def backwards(self, orm):
# Changing field 'GeneratedCertificate.certificate_id'
db.alter_column('certificates_generatedcertificate', 'certificate_id', self.gf('django.db.models.fields.CharField')(default=None, max_length=32))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'certificates.generatedcertificate': {
'Meta': {'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grade': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'graded_certificate_id': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '32', 'null': 'True'}),
'graded_download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 | 5,627,653,851,054,084,000 | 76.166667 | 182 | 0.568305 | false |
pymedusa/SickRage | ext/guessit/rules/properties/video_codec.py | 4 | 4959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
video_codec and video_profile property
"""
from rebulk import Rebulk, Rule, RemoveMatch
from rebulk.remodule import re
from ..common import dash
from ..common.pattern import is_disabled
from ..common.validators import seps_after, seps_before, seps_surround
def video_codec(config): # pylint:disable=unused-argument
"""
Builder for rebulk object.
:param config: rule configuration
:type config: dict
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk()
rebulk = rebulk.regex_defaults(flags=re.IGNORECASE, abbreviations=[dash]).string_defaults(ignore_case=True)
rebulk.defaults(name="video_codec",
tags=['source-suffix', 'streaming_service.suffix'],
disabled=lambda context: is_disabled(context, 'video_codec'))
rebulk.regex(r'Rv\d{2}', value='RealVideo')
rebulk.regex('Mpe?g-?2', '[hx]-?262', value='MPEG-2')
rebulk.string("DVDivX", "DivX", value="DivX")
rebulk.string('XviD', value='Xvid')
rebulk.regex('VC-?1', value='VC-1')
rebulk.string('VP7', value='VP7')
rebulk.string('VP8', 'VP80', value='VP8')
rebulk.string('VP9', value='VP9')
rebulk.regex('[hx]-?263', value='H.263')
rebulk.regex('[hx]-?264', '(MPEG-?4)?AVC(?:HD)?', value='H.264')
rebulk.regex('[hx]-?265', 'HEVC', value='H.265')
rebulk.regex('(?P<video_codec>hevc)(?P<color_depth>10)', value={'video_codec': 'H.265', 'color_depth': '10-bit'},
tags=['video-codec-suffix'], children=True)
# http://blog.mediacoderhq.com/h264-profiles-and-levels/
# https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC
rebulk.defaults(clear=True,
name="video_profile",
validator=seps_surround,
disabled=lambda context: is_disabled(context, 'video_profile'))
rebulk.string('BP', value='Baseline', tags='video_profile.rule')
rebulk.string('XP', 'EP', value='Extended', tags='video_profile.rule')
rebulk.string('MP', value='Main', tags='video_profile.rule')
rebulk.string('HP', 'HiP', value='High', tags='video_profile.rule')
# https://en.wikipedia.org/wiki/Scalable_Video_Coding
rebulk.string('SC', 'SVC', value='Scalable Video Coding', tags='video_profile.rule')
# https://en.wikipedia.org/wiki/AVCHD
rebulk.regex('AVC(?:HD)?', value='Advanced Video Codec High Definition', tags='video_profile.rule')
# https://en.wikipedia.org/wiki/H.265/HEVC
rebulk.string('HEVC', value='High Efficiency Video Coding', tags='video_profile.rule')
rebulk.regex('Hi422P', value='High 4:2:2')
rebulk.regex('Hi444PP', value='High 4:4:4 Predictive')
rebulk.regex('Hi10P?', value='High 10') # no profile validation is required
rebulk.string('DXVA', value='DXVA', name='video_api',
disabled=lambda context: is_disabled(context, 'video_api'))
rebulk.defaults(clear=True,
name='color_depth',
validator=seps_surround,
disabled=lambda context: is_disabled(context, 'color_depth'))
rebulk.regex('12.?bits?', value='12-bit')
rebulk.regex('10.?bits?', 'YUV420P10', 'Hi10P?', value='10-bit')
rebulk.regex('8.?bits?', value='8-bit')
rebulk.rules(ValidateVideoCodec, VideoProfileRule)
return rebulk
class ValidateVideoCodec(Rule):
"""
Validate video_codec with source property or separated
"""
priority = 64
consequence = RemoveMatch
def enabled(self, context):
return not is_disabled(context, 'video_codec')
def when(self, matches, context):
ret = []
for codec in matches.named('video_codec'):
if not seps_before(codec) and \
not matches.at_index(codec.start - 1, lambda match: 'video-codec-prefix' in match.tags):
ret.append(codec)
continue
if not seps_after(codec) and \
not matches.at_index(codec.end + 1, lambda match: 'video-codec-suffix' in match.tags):
ret.append(codec)
continue
return ret
class VideoProfileRule(Rule):
"""
Rule to validate video_profile
"""
consequence = RemoveMatch
def enabled(self, context):
return not is_disabled(context, 'video_profile')
def when(self, matches, context):
profile_list = matches.named('video_profile', lambda match: 'video_profile.rule' in match.tags)
ret = []
for profile in profile_list:
codec = matches.at_span(profile.span, lambda match: match.name == 'video_codec', 0)
if not codec:
codec = matches.previous(profile, lambda match: match.name == 'video_codec')
if not codec:
codec = matches.next(profile, lambda match: match.name == 'video_codec')
if not codec:
ret.append(profile)
return ret
| gpl-3.0 | 8,392,812,509,139,827,000 | 38.357143 | 117 | 0.618471 | false |
ZhangXinNan/tensorflow | tensorflow/python/saved_model/loader_impl.py | 5 | 14273 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loader implementation for SavedModel with hermetic, language-neutral exports.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
def _parse_saved_model(export_dir):
"""Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`.
Args:
export_dir: Directory containing the SavedModel file.
Returns:
A `SavedModel` protocol buffer.
Raises:
IOError: If the file does not exist, or cannot be successfully parsed.
"""
# Build the path to the SavedModel in pbtxt format.
path_to_pbtxt = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
# Build the path to the SavedModel in pb format.
path_to_pb = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
# Parse the SavedModel protocol buffer.
saved_model = saved_model_pb2.SavedModel()
if file_io.file_exists(path_to_pb):
try:
file_content = file_io.FileIO(path_to_pb, "rb").read()
saved_model.ParseFromString(file_content)
return saved_model
except message.DecodeError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pb, str(e)))
elif file_io.file_exists(path_to_pbtxt):
try:
file_content = file_io.FileIO(path_to_pbtxt, "rb").read()
text_format.Merge(file_content.decode("utf-8"), saved_model)
return saved_model
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (path_to_pbtxt, str(e)))
else:
raise IOError("SavedModel file does not exist at: %s/{%s|%s}" %
(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT,
constants.SAVED_MODEL_FILENAME_PB))
def _get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None):
"""Gets the asset tensors, if defined in the meta graph def to load.
Args:
export_dir: Directory where the SavedModel is located.
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
import_scope: Optional `string` -- if specified, prepend this followed by
'/' to all returned asset tensor names.
Returns:
A dictionary of asset tensors, keyed by the name of the asset tensor. The
value in the map corresponds to the absolute path of the asset file.
"""
# Collection-def that may contain the assets key.
collection_def = meta_graph_def_to_load.collection_def
asset_tensor_dict = {}
if constants.ASSETS_KEY in collection_def:
# Location of the assets for SavedModel.
assets_directory = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY))
assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value
# Process each asset and add it to the asset tensor dictionary.
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
tensor_name = asset_proto.tensor_info.name
if import_scope:
tensor_name = "%s/%s" % (import_scope, tensor_name)
asset_tensor_dict[tensor_name] = os.path.join(
compat.as_bytes(assets_directory),
compat.as_bytes(asset_proto.filename))
return asset_tensor_dict
def _get_main_op_tensor(
meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):
"""Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
init_op_key: name of collection to check; should be one of MAIN_OP_KEY
or the deprecated LEGACY_INIT_OP_KEY
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor.
"""
collection_def = meta_graph_def_to_load.collection_def
main_op_tensor = None
if init_op_key in collection_def:
main_ops = collection_def[init_op_key].node_list.value
if len(main_ops) != 1:
raise RuntimeError("Expected exactly one SavedModel main op. "
"Found: {}".format(main_ops))
main_op_tensor = ops.get_collection(init_op_key)[0]
return main_op_tensor
@tf_export("saved_model.loader.maybe_saved_model_directory")
def maybe_saved_model_directory(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
txt_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
pb_path = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
return file_io.file_exists(txt_path) or file_io.file_exists(pb_path)
@tf_export("saved_model.loader.load")
def load(sess, tags, export_dir, import_scope=None, **saver_kwargs):
"""Loads the model from a SavedModel as specified by tags.
Args:
sess: The TensorFlow session to restore the variables.
tags: Set of string tags to identify the required MetaGraphDef. These should
correspond to the tags used when saving the variables using the
SavedModel `save()` API.
export_dir: Directory in which the SavedModel protocol buffer and variables
to be loaded are located.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: Optional keyword arguments passed through to Saver.
Returns:
The `MetaGraphDef` protocol buffer loaded in the provided session. This
can be used to further extract signature-defs, collection-defs, etc.
Raises:
RuntimeError: MetaGraphDef associated with the tags cannot be found.
"""
loader = SavedModelLoader(export_dir)
return loader.load(sess, tags, import_scope, **saver_kwargs)
class SavedModelLoader(object):
"""Load graphs and restore variable values from a `SavedModel`."""
def __init__(self, export_dir):
"""Creates a `SavedModelLoader`.
Args:
export_dir: Directory in which the SavedModel protocol buffer and
variables to be loaded are located.
"""
self._export_dir = export_dir
self._variables_path = saved_model_utils.get_variables_path(export_dir)
self._saved_model = _parse_saved_model(export_dir)
@property
def export_dir(self):
"""Directory containing the SavedModel."""
return self._export_dir
@property
def variables_path(self):
"""Path to variable checkpoint files."""
return self._variables_path
@property
def saved_model(self):
"""SavedModel object parsed from the export directory."""
return self._saved_model
def get_meta_graph_def_from_tags(self, tags):
"""Return MetaGraphDef with the exact specified tags.
Args:
tags: A list or set of string tags that identify the MetaGraphDef.
Returns:
MetaGraphDef with the same tags.
Raises:
RuntimeError: if no metagraphs were found with the associated tags.
"""
found_match = False
for meta_graph_def in self._saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(tags):
meta_graph_def_to_load = meta_graph_def
found_match = True
break
if not found_match:
raise RuntimeError(
"MetaGraphDef associated with tags " + str(tags).strip("[]") +
" could not be found in SavedModel. To inspect available tag-sets in"
" the SavedModel, please use the SavedModel CLI: `saved_model_cli`"
)
return meta_graph_def_to_load
def load_graph(self, graph, tags, import_scope=None, **saver_kwargs):
"""Load ops and nodes from SavedModel MetaGraph into graph.
Args:
graph: tf.Graph object.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
A tuple of
* Saver defined by the MetaGraph, which can be used to restore the
variable values.
* List of `Operation`/`Tensor` objects returned from
`tf.import_graph_def` (may be `None`).
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with graph.as_default():
return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access
meta_graph_def, import_scope=import_scope, **saver_kwargs)
def restore_variables(self, sess, saver, import_scope=None):
"""Restore SavedModel variable values into the session.
Args:
sess: tf.Session to restore variable values.
saver: a tf.train.Saver object. Can be None if there are no variables in
graph. This may be the saver returned by the load_graph() function, or a
default `tf.train.Saver()`.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
Raises:
ValueError: if no saver was passed to the saver argument, and there are
variables in the graph.
"""
with sess.graph.as_default():
if (saver is None and
not variables._all_saveable_objects(scope=import_scope)): # pylint: disable=protected-access
tf_logging.info("The specified SavedModel has no variables; no "
"checkpoints were restored.")
elif isinstance(saver, tf_saver.Saver):
saver.restore(sess, self._variables_path)
else:
raise ValueError(
"No tf.train.Saver object was passed to the function "
"SavedModelLoader.restore_variables. Since there are variables in "
"the graph, a saver is required.")
def run_init_ops(self, sess, tags, import_scope=None):
"""Run initialization ops defined in the `MetaGraphDef`.
Args:
sess: tf.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with sess.graph.as_default():
# Get asset tensors, if any.
asset_tensors_dictionary = _get_asset_tensors(
self._export_dir, meta_graph_def, import_scope=import_scope)
main_op_tensor = (
_get_main_op_tensor(meta_graph_def, constants.MAIN_OP_KEY) or
_get_main_op_tensor(meta_graph_def, constants.LEGACY_INIT_OP_KEY))
if main_op_tensor is not None:
sess.run(fetches=[main_op_tensor], feed_dict=asset_tensors_dictionary)
def load(self, sess, tags, import_scope=None, **saver_kwargs):
"""Load the MetaGraphDef graph and restore variable values into the session.
Args:
sess: tf.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
`MetagraphDef` proto of the graph that was loaded.
"""
with sess.graph.as_default():
saver, _ = self.load_graph(sess.graph, tags, import_scope,
**saver_kwargs)
self.restore_variables(sess, saver, import_scope)
self.run_init_ops(sess, tags, import_scope)
return self.get_meta_graph_def_from_tags(tags)
| apache-2.0 | -2,493,810,611,091,114,500 | 39.896848 | 103 | 0.690955 | false |
django-wodnas/django-tinymce | tinymce/widgets.py | 6 | 4860 | # Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
"""
This TinyMCE widget was copied and extended from this code by John D'Agostino:
http://code.djangoproject.com/wiki/CustomWidgetsTinyMCE
"""
from django import forms
from django.conf import settings
from django.contrib.admin import widgets as admin_widgets
from django.core.urlresolvers import reverse
from django.forms.widgets import flatatt
try:
from django.utils.encoding import smart_unicode
except ImportError:
from django.forms.util import smart_unicode
from django.utils.html import escape
from django.utils import simplejson
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext as _
import tinymce.settings
class TinyMCE(forms.Textarea):
"""
TinyMCE widget. Set settings.TINYMCE_JS_URL to set the location of the
javascript file. Default is "MEDIA_URL + 'js/tiny_mce/tiny_mce.js'".
You can customize the configuration with the mce_attrs argument to the
constructor.
In addition to the standard configuration you can set the
'content_language' parameter. It takes the value of the 'language'
parameter by default.
In addition to the default settings from settings.TINYMCE_DEFAULT_CONFIG,
this widget sets the 'language', 'directionality' and
'spellchecker_languages' parameters by default. The first is derived from
the current Django language, the others from the 'content_language'
parameter.
"""
def __init__(self, content_language=None, attrs=None, mce_attrs=None):
super(TinyMCE, self).__init__(attrs)
if mce_attrs is None:
mce_attrs = {}
self.mce_attrs = mce_attrs
if content_language is None:
content_language = mce_attrs.get('language', None)
self.content_language = content_language
def render(self, name, value, attrs=None):
if value is None: value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, "TinyMCE widget attributes must contain 'id'"
mce_config = tinymce.settings.DEFAULT_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['mode'] = 'exact'
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
mce_json = simplejson.dumps(mce_config)
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
compressor_json = simplejson.dumps(compressor_config)
html.append(u'<script type="text/javascript">tinyMCE_GZ.init(%s)</script>' % compressor_json)
html.append(u'<script type="text/javascript">tinyMCE.init(%s)</script>' % mce_json)
return mark_safe(u'\n'.join(html))
def _media(self):
if tinymce.settings.USE_COMPRESSOR:
js = [reverse('tinymce-compressor')]
else:
js = [tinymce.settings.JS_URL]
if tinymce.settings.USE_FILEBROWSER:
js.append(reverse('tinymce-filebrowser'))
return forms.Media(js=js)
media = property(_media)
class AdminTinyMCE(admin_widgets.AdminTextareaWidget, TinyMCE):
pass
def get_language_config(content_language=None):
language = get_language()[:2]
if content_language:
content_language = content_language[:2]
else:
content_language = language
config = {}
config['language'] = language
lang_names = SortedDict()
for lang, name in settings.LANGUAGES:
if lang[:2] not in lang_names: lang_names[lang[:2]] = []
lang_names[lang[:2]].append(_(name))
sp_langs = []
for lang, names in lang_names.items():
if lang == content_language:
default = '+'
else:
default = ''
sp_langs.append(u'%s%s=%s' % (default, ' / '.join(names), lang))
config['spellchecker_languages'] = ','.join(sp_langs)
if content_language in settings.LANGUAGES_BIDI:
config['directionality'] = 'rtl'
else:
config['directionality'] = 'ltr'
if tinymce.settings.USE_SPELLCHECKER:
config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check')
return config
| mit | -6,911,016,297,554,156,000 | 35.818182 | 105 | 0.653909 | false |
hoh/Billabong | billabong/check.py | 1 | 2931 | # Copyright (c) 2015 "Hugo Herter http://hugoherter.com"
#
# This file is part of Billabong.
#
# Intercom is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Check the integrity of the data."""
import logging
from base64 import b64decode
from .encryption import hashing, decrypt_blob
from .utils import read_in_chunks
from .exceptions import CheckError
from .settings import inventory, stores
def compute_hash(file_object, chunk_size=1024):
"""Return the hash of a file object.
Compute the hash of the content of a file object using
the given hashing function, by reading it chunk by chunk.
"""
file_hash = hashing()
for chunk in read_in_chunks(file_object, chunk_size):
file_hash.update(chunk)
return file_hash
def check_data(id_=None, record=None, raises=False):
"""Check the integrity of the data for a record id or record."""
if id_ and not record:
record = inventory.get_record(id_)
elif record and not id_:
id_ = record['id']
else:
raise ValueError("Missing value for 'id' or 'meta'.")
blob_id = record['blob']
check_enc_data(blob_id, raises)
key = b64decode(record['key'])
hash_ = record['hash']
check_clear_data(blob_id, key, hash_)
def check_enc_data(blob_id, raises=False):
"""Check the validity of an encrypted blob."""
enc_path = stores[0]._blob_path(blob_id)
with open(enc_path, 'rb') as enc_file:
enc_hash = compute_hash(enc_file)
if blob_id == enc_hash.hexdigest():
return True
else:
reason = ("Encrypted data does not match the hash for id '{}'"
.format(blob_id))
if raises:
raise CheckError(reason)
else:
logging.error(reason)
return False
def check_clear_data(id_, key, hash_, raises=False):
"""Check the validity of the clear data inside a blob."""
clear_data = decrypt_blob(stores[0], id_, key)
clear_hash = hashing()
for chunk in clear_data:
clear_hash.update(chunk)
if hash_ == "sha256-" + clear_hash.hexdigest():
return True
else:
reason = ("Clear data does not match the hash for id '{}'"
.format(id_))
if raises:
raise CheckError(reason)
else:
logging.error(reason)
return False
| agpl-3.0 | 985,511,317,169,826,000 | 30.180851 | 74 | 0.654043 | false |
valkjsaaa/sl4a | python/xmpppy/doc/examples/commandsbot.py | 87 | 7937 | #!/usr/bin/python
""" The example of using xmpppy's Ad-Hoc Commands (JEP-0050) implementation.
"""
import xmpp
from xmpp.protocol import *
options = {
'JID': '[email protected]',
'Password': '********',
}
class TestCommand(xmpp.commands.Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works. This one
actually does some calculations."""
name = 'testcommand'
description = 'Circle calculations'
def __init__(self, jid=''):
""" Initialize some internals. Set the first request handler to self.calcTypeForm.
"""
xmpp.commands.Command_Handler_Prototype.__init__(self,jid)
self.initial = {
'execute': self.initialForm
}
def initialForm(self, conn, request):
""" Assign a session id and send the first form. """
sessionid = self.getSessionID()
self.sessions[sessionid] = {
'jid':request.getFrom(),
'data':{'type':None}
}
# simulate that the client sent sessionid, so calcTypeForm will be able
# to continue
request.getTag(name="command").setAttr('sessionid', sessionid)
return self.calcTypeForm(conn, request)
def calcTypeForm(self, conn, request):
""" Send first form to the requesting user. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# What to do when a user sends us a response? Note, that we should always
# include 'execute', as it is a default action when requester does not send
# exact action to do (should be set to the same as 'next' or 'complete' fields)
session['actions'] = {
'cancel': self.cancel,
'next': self.calcTypeFormAccept,
'execute': self.calcTypeFormAccept,
}
# The form to send
calctypefield = xmpp.DataField(
name='calctype',
desc='Calculation Type',
value=session['data']['type'],
options=[
['Calculate the diameter of a circle','circlediameter'],
['Calculate the area of a circle','circlearea']
],
typ='list-single',
required=1)
# We set label attribute... seems that the xmpppy.DataField cannot do that
calctypefield.setAttr('label', 'Calculation Type')
form = xmpp.DataForm(
title='Select type of operation',
data=[
'Use the combobox to select the type of calculation you would like'\
'to do, then click Next.',
calctypefield])
# Build a reply with the form
reply = request.buildReply('result')
replypayload = [
xmpp.Node('actions',
attrs={'execute':'next'},
payload=[xmpp.Node('next')]),
form]
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'executing'},
payload=replypayload)
self._owner.send(reply) # Question: self._owner or conn?
raise xmpp.NodeProcessed
def calcTypeFormAccept(self, conn, request):
""" Load the calcType form filled in by requester, then reply with
the second form. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# load the form
node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA)
form = xmpp.DataForm(node=node)
# retrieve the data
session['data']['type'] = form.getField('calctype').getValue()
# send second form
return self.calcDataForm(conn, request)
def calcDataForm(self, conn, request, notavalue=None):
""" Send a form asking for diameter. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# set the actions taken on requester's response
session['actions'] = {
'cancel': self.cancel,
'prev': self.calcTypeForm,
'next': self.calcDataFormAccept,
'execute': self.calcDataFormAccept
}
# create a form
radiusfield = xmpp.DataField(desc='Radius',name='radius',typ='text-single')
radiusfield.setAttr('label', 'Radius')
form = xmpp.DataForm(
title = 'Enter the radius',
data=[
'Enter the radius of the circle (numbers only)',
radiusfield])
# build a reply stanza
reply = request.buildReply('result')
replypayload = [
xmpp.Node('actions',
attrs={'execute':'complete'},
payload=[xmpp.Node('complete'),xmpp.Node('prev')]),
form]
if notavalue:
replypayload.append(xmpp.Node('note',
attrs={'type': 'warn'},
payload=['You have to enter valid number.']))
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':request.getTagAttr('command','sessionid'),
'status':'executing'},
payload=replypayload)
self._owner.send(reply)
raise xmpp.NodeProcessed
def calcDataFormAccept(self, conn, request):
""" Load the calcType form filled in by requester, then reply with the result. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# load the form
node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA)
form = xmpp.DataForm(node=node)
# retrieve the data; if the entered value is not a number, return to second stage
try:
value = float(form.getField('radius').getValue())
except:
self.calcDataForm(conn, request, notavalue=True)
# calculate the answer
from math import pi
if session['data']['type'] == 'circlearea':
result = (value**2) * pi
else:
result = 2 * value * pi
# build the result form
form = xmpp.DataForm(
typ='result',
data=[xmpp.DataField(desc='result', name='result', value=result)])
# build the reply stanza
reply = request.buildReply('result')
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'completed'},
payload=[form])
self._owner.send(reply)
# erase the data about session
del self.sessions[sessionid]
raise xmpp.NodeProcessed
def cancel(self, conn, request):
""" Requester canceled the session, send a short reply. """
# get the session id
sessionid = request.getTagAttr('command','sessionid')
# send the reply
reply = request.buildReply('result')
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'cancelled'})
self._owner.send(reply)
# erase the data about session
del self.sessions[sessionid]
raise xmpp.NodeProcessed
class ConnectionError: pass
class AuthorizationError: pass
class NotImplemented: pass
class Bot:
""" The main bot class. """
def __init__(self, JID, Password):
""" Create a new bot. Connect to the server and log in. """
# connect...
jid = xmpp.JID(JID)
self.connection = xmpp.Client(jid.getDomain(), debug=['always', 'browser', 'testcommand'])
result = self.connection.connect()
if result is None:
raise ConnectionError
# authorize
result = self.connection.auth(jid.getNode(), Password)
if result is None:
raise AuthorizationError
# plugins
# disco - needed by commands
# warning: case of "plugin" method names are important!
# to attach a command to Commands class, use .plugin()
# to attach anything to Client class, use .PlugIn()
self.disco = xmpp.browser.Browser()
self.disco.PlugIn(self.connection)
self.disco.setDiscoHandler({
'info': {
'ids': [{
'category': 'client',
'type': 'pc',
'name': 'Bot'
}],
'features': [NS_DISCO_INFO],
}
})
self.commands = xmpp.commands.Commands(self.disco)
self.commands.PlugIn(self.connection)
self.command_test = TestCommand()
self.command_test.plugin(self.commands)
# presence
self.connection.sendInitPresence(requestRoster=0)
def loop(self):
""" Do nothing except handling new xmpp stanzas. """
try:
while self.connection.Process(1):
pass
except KeyboardInterrupt:
pass
bot = Bot(**options)
bot.loop()
| apache-2.0 | 3,100,345,150,622,980,600 | 26.463668 | 92 | 0.681114 | false |
mat12/mytest | lib/python/Components/ChoiceList.py | 7 | 2176 | from MenuList import MenuList
from Tools.Directories import SCOPE_ACTIVE_SKIN, resolveFilename
from enigma import RT_HALIGN_LEFT, eListboxPythonMultiContent, gFont, getDesktop
from Tools.LoadPixmap import LoadPixmap
from Tools.Directories import fileExists
import skin
def ChoiceEntryComponent(key="", text=None):
screenwidth = getDesktop(0).size().width()
if not text: text = ["--"]
res = [ text ]
if text[0] == "--":
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 0, 00, 900, 45, 0, RT_HALIGN_LEFT, "-"*200))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 0, 00, 800, 25, 0, RT_HALIGN_LEFT, "-"*200))
else:
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 100, 7, 900, 45, 0, RT_HALIGN_LEFT, text[0]))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 45, 00, 800, 25, 0, RT_HALIGN_LEFT, text[0]))
if key:
if key == "expandable":
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/expandable.png")
elif key == "expanded":
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/expanded.png")
elif key == "verticalline":
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/verticalline.png")
else:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % key)
if fileExists(pngfile):
png = LoadPixmap(pngfile)
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, 10, 5, 63, 48, png))
else:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, 5, 0, 35, 25, png))
return res
class ChoiceList(MenuList):
def __init__(self, list, selection = 0, enableWrapAround=False):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
font = skin.fonts["ChoiceList"]
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
self.ItemHeight = font[2]
self.selection = selection
def postWidgetCreate(self, instance):
MenuList.postWidgetCreate(self, instance)
self.moveToIndex(self.selection)
self.instance.setWrapAround(True)
def getItemHeight(self):
return self.ItemHeight
| gpl-2.0 | -2,123,228,725,338,566,100 | 38.563636 | 98 | 0.721967 | false |
Gheehnest/three.js | utils/exporters/blender/addons/io_three/exporter/api/material.py | 55 | 8333 | from bpy import data, types
from .. import constants, logger
from .constants import MULTIPLY, WIRE, IMAGE
def _material(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Material):
material = name
else:
material = data.materials[name]
return func(material, *args, **kwargs)
return inner
@_material
def ambient_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.ambient_color(%s)", material)
diffuse = diffuse_color(material)
return (material.ambient * diffuse[0],
material.ambient * diffuse[1],
material.ambient * diffuse[2])
@_material
def blending(material):
"""
:param material:
:return: THREE_blending_type value
"""
logger.debug("material.blending(%s)", material)
try:
blend = material.THREE_blending_type
except AttributeError:
logger.debug("No THREE_blending_type attribute found")
blend = constants.NORMAL_BLENDING
return blend
@_material
def bump_map(material):
"""
:param material:
:return: texture node for bump
"""
logger.debug("material.bump_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and not \
texture.texture.use_normal_map:
return texture.texture
@_material
def bump_scale(material):
"""
:param material:
:rtype: float
"""
return normal_scale(material)
@_material
def depth_test(material):
"""
:param material:
:return: THREE_depth_test value
:rtype: bool
"""
logger.debug("material.depth_test(%s)", material)
try:
test = material.THREE_depth_test
except AttributeError:
logger.debug("No THREE_depth_test attribute found")
test = True
return test
@_material
def depth_write(material):
"""
:param material:
:return: THREE_depth_write value
:rtype: bool
"""
logger.debug("material.depth_write(%s)", material)
try:
write = material.THREE_depth_write
except AttributeError:
logger.debug("No THREE_depth_write attribute found")
write = True
return write
@_material
def double_sided(material):
"""
:param material:
:return: THREE_double_sided value
:rtype: bool
"""
logger.debug("material.double_sided(%s)", material)
try:
write = material.THREE_double_sided
except AttributeError:
logger.debug("No THREE_double_sided attribute found")
write = False
return write
@_material
def diffuse_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.diffuse_color(%s)", material)
return (material.diffuse_intensity * material.diffuse_color[0],
material.diffuse_intensity * material.diffuse_color[1],
material.diffuse_intensity * material.diffuse_color[2])
@_material
def diffuse_map(material):
"""
:param material:
:return: texture node for map
"""
logger.debug("material.diffuse_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and not \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def emissive_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.emissive_color(%s)", material)
diffuse = diffuse_color(material)
return (material.emit * diffuse[0],
material.emit * diffuse[1],
material.emit * diffuse[2])
@_material
def light_map(material):
"""
:param material:
:return: texture node for light maps
"""
logger.debug("material.light_map(%s)", material)
for texture in _valid_textures(material, strict_use=False):
if texture.use_map_color_diffuse and \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def normal_scale(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.normal_scale(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal:
return texture.normal_factor
@_material
def normal_map(material):
"""
:param material:
:return: texture node for normals
"""
logger.debug("material.normal_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and \
texture.texture.use_normal_map:
return texture.texture
@_material
def opacity(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.opacity(%s)", material)
return round(material.alpha, 2)
@_material
def shading(material):
"""
:param material:
:return: shading type (phong or lambert)
"""
logger.debug("material.shading(%s)", material)
dispatch = {
True: constants.PHONG,
False: constants.LAMBERT
}
return dispatch[material.specular_intensity > 0.0]
@_material
def specular_coef(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.specular_coef(%s)", material)
return material.specular_hardness
@_material
def specular_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.specular_color(%s)", material)
return (material.specular_intensity * material.specular_color[0],
material.specular_intensity * material.specular_color[1],
material.specular_intensity * material.specular_color[2])
@_material
def specular_map(material):
"""
:param material:
:return: texture node for specular
"""
logger.debug("material.specular_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_specular:
return texture.texture
@_material
def transparent(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.transparent(%s)", material)
return material.use_transparency
@_material
def type(material):
"""
:param material:
:return: THREE compatible shader type
"""
logger.debug("material.type(%s)", material)
if material.diffuse_shader != 'LAMBERT':
material_type = constants.BASIC
elif material.specular_intensity > 0:
material_type = constants.PHONG
else:
material_type = constants.LAMBERT
return material_type
@_material
def use_vertex_colors(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.use_vertex_colors(%s)", material)
return material.use_vertex_color_paint
def used_materials():
"""
:return: list of materials that are in use
:rtype: generator
"""
logger.debug("material.used_materials()")
for material in data.materials:
if material.users > 0:
yield material.name
@_material
def visible(material):
"""
:param material:
:return: THREE_visible value
:rtype: bool
"""
logger.debug("material.visible(%s)", material)
try:
vis = material.THREE_visible
except AttributeError:
logger.debug("No THREE_visible attribute found")
vis = True
return vis
@_material
def wireframe(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.wireframe(%s)", material)
return material.type == WIRE
def _valid_textures(material, strict_use=True):
"""
:param material:
:rtype: generator
"""
for texture in material.texture_slots:
if not texture:
continue
if strict_use:
in_use = texture.use
else:
in_use = True
if not in_use:
continue
if not texture.texture or texture.texture.type != IMAGE:
logger.warning("Unable to export non-image texture %s", texture)
continue
logger.debug("Valid texture found %s", texture)
yield texture
| mit | -8,537,457,587,146,690,000 | 19.225728 | 76 | 0.614905 | false |
ferrants/ansible | test/units/parsing/test_splitter.py | 204 | 4425 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.splitter import split_args, parse_kv
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestSplitter_Gen:
SPLIT_DATA = (
(u'a',
[u'a'],
{u'_raw_params': u'a'}),
(u'a=b',
[u'a=b'],
{u'a': u'b'}),
(u'a="foo bar"',
[u'a="foo bar"'],
{u'a': u'foo bar'}),
(u'"foo bar baz"',
[u'"foo bar baz"'],
{u'_raw_params': '"foo bar baz"'}),
(u'foo bar baz',
[u'foo', u'bar', u'baz'],
{u'_raw_params': u'foo bar baz'}),
(u'a=b c="foo bar"',
[u'a=b', u'c="foo bar"'],
{u'a': u'b', u'c': u'foo bar'}),
(u'a="echo \\"hello world\\"" b=bar',
[u'a="echo \\"hello world\\""', u'b=bar'],
{u'a': u'echo "hello world"', u'b': u'bar'}),
(u'a="multi\nline"',
[u'a="multi\nline"'],
{u'a': u'multi\nline'}),
(u'a="blank\n\nline"',
[u'a="blank\n\nline"'],
{u'a': u'blank\n\nline'}),
(u'a="blank\n\n\nlines"',
[u'a="blank\n\n\nlines"'],
{u'a': u'blank\n\n\nlines'}),
(u'a="a long\nmessage\\\nabout a thing\n"',
[u'a="a long\nmessage\\\nabout a thing\n"'],
{u'a': u'a long\nmessage\\\nabout a thing\n'}),
(u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"',
[u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'],
{u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}),
(u'a={{jinja}}',
[u'a={{jinja}}'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}',
[u'a={{ jinja }}'],
{u'a': u'{{ jinja }}'}),
(u'a="{{jinja}}"',
[u'a="{{jinja}}"'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}{{jinja2}}',
[u'a={{ jinja }}{{jinja2}}'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a="{{ jinja }}{{jinja2}}"',
[u'a="{{ jinja }}{{jinja2}}"'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a={{jinja}} b={{jinja2}}',
[u'a={{jinja}}', u'b={{jinja2}}'],
{u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}),
(u'a="{{jinja}}\n" b="{{jinja2}}\n"',
[u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'],
{u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}),
(u'a="café eñyei"',
[u'a="café eñyei"'],
{u'a': u'café eñyei'}),
(u'a=café b=eñyei',
[u'a=café', u'b=eñyei'],
{u'a': u'café', u'b': u'eñyei'}),
)
def check_split_args(self, args, expected):
tools.eq_(split_args(args), expected)
def test_split_args(self):
for datapoint in self.SPLIT_DATA:
yield self.check_split_args, datapoint[0], datapoint[1]
def check_parse_kv(self, args, expected):
tools.eq_(parse_kv(args), expected)
def test_parse_kv(self):
for datapoint in self.SPLIT_DATA:
try:
yield self.check_parse_kv, datapoint[0], datapoint[2]
except: pass
| gpl-3.0 | 5,759,693,348,333,707,000 | 38.401786 | 85 | 0.468842 | false |
admire93/youtube-dl | youtube_dl/extractor/sharesix.py | 128 | 2692 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
parse_duration,
)
class ShareSixIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?sharesix\.com/(?:f/)?(?P<id>[0-9a-zA-Z]+)'
_TESTS = [
{
'url': 'http://sharesix.com/f/OXjQ7Y6',
'md5': '9e8e95d8823942815a7d7c773110cc93',
'info_dict': {
'id': 'OXjQ7Y6',
'ext': 'mp4',
'title': 'big_buck_bunny_480p_surround-fix.avi',
'duration': 596,
'width': 854,
'height': 480,
},
},
{
'url': 'http://sharesix.com/lfrwoxp35zdd',
'md5': 'dd19f1435b7cec2d7912c64beeee8185',
'info_dict': {
'id': 'lfrwoxp35zdd',
'ext': 'flv',
'title': 'WhiteBoard___a_Mac_vs_PC_Parody_Cartoon.mp4.flv',
'duration': 65,
'width': 1280,
'height': 720,
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
fields = {
'method_free': 'Free'
}
post = compat_urllib_parse.urlencode(fields)
req = compat_urllib_request.Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id,
'Downloading video page')
video_url = self._search_regex(
r"var\slnk1\s=\s'([^']+)'", webpage, 'video URL')
title = self._html_search_regex(
r'(?s)<dt>Filename:</dt>.+?<dd>(.+?)</dd>', webpage, 'title')
duration = parse_duration(
self._search_regex(
r'(?s)<dt>Length:</dt>.+?<dd>(.+?)</dd>',
webpage,
'duration',
fatal=False
)
)
m = re.search(
r'''(?xs)<dt>Width\sx\sHeight</dt>.+?
<dd>(?P<width>\d+)\sx\s(?P<height>\d+)</dd>''',
webpage
)
width = height = None
if m:
width, height = int(m.group('width')), int(m.group('height'))
formats = [{
'format_id': 'sd',
'url': video_url,
'width': width,
'height': height,
}]
return {
'id': video_id,
'title': title,
'duration': duration,
'formats': formats,
}
| unlicense | 5,374,723,151,778,540,000 | 27.946237 | 80 | 0.447994 | false |
elsonrodriguez/madhatter | cobbler/modules/sync_post_restart_services.py | 6 | 2421 | import distutils.sysconfig
import sys
import os
import traceback
import cexceptions
import os
import sys
import xmlrpclib
import cobbler.module_loader as module_loader
import cobbler.utils as utils
plib = distutils.sysconfig.get_python_lib()
mod_path="%s/cobbler" % plib
sys.path.insert(0, mod_path)
def register():
# this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.
# the return of this method indicates the trigger type
return "/var/lib/cobbler/triggers/sync/post/*"
def run(api,args,logger):
settings = api.settings()
manage_dhcp = str(settings.manage_dhcp).lower()
manage_dns = str(settings.manage_dns).lower()
restart_dhcp = str(settings.restart_dhcp).lower()
restart_dns = str(settings.restart_dns).lower()
which_dhcp_module = module_loader.get_module_from_file("dhcp","module",just_name=True).strip()
which_dns_module = module_loader.get_module_from_file("dns","module",just_name=True).strip()
# special handling as we don't want to restart it twice
has_restarted_dnsmasq = False
rc = 0
if manage_dhcp != "0":
if which_dhcp_module == "manage_isc":
if restart_dhcp != "0":
rc = utils.subprocess_call(logger, "dhcpd -t -q", shell=True)
if rc != 0:
logger.error("dhcpd -t failed")
return 1
rc = utils.subprocess_call(logger,"/etc/rc.d/init.d/dhcpd restart", shell=True)
elif which_dhcp_module == "manage_dnsmasq":
if restart_dhcp != "0":
rc = utils.subprocess_call(logger, "/etc/rc.d/init.d/dnsmasq restart")
has_restarted_dnsmasq = True
else:
logger.error("unknown DHCP engine: %s" % which_dhcp_module)
rc = 411
if manage_dns != "0" and restart_dns != "0":
if which_dns_module == "manage_bind":
rc = utils.subprocess_call(logger, "/etc/rc.d/init.d/named restart", shell=True)
elif which_dns_module == "manage_dnsmasq" and not has_restarted_dnsmasq:
rc = utils.subprocess_call(logger, "/etc/rc.d/init.d/dnsmasq restart", shell=True)
elif which_dns_module == "manage_dnsmasq" and has_restarted_dnsmasq:
rc = 0
else:
logger.error("unknown DNS engine: %s" % which_dns_module)
rc = 412
return rc
| gpl-2.0 | -6,551,306,974,139,904,000 | 36.246154 | 98 | 0.619992 | false |
caphrim007/ansible | lib/ansible/module_utils/network/f5/bigiq.py | 15 | 6723 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import time
try:
from f5.bigiq import ManagementRoot
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
try:
from library.module_utils.network.f5.common import F5BaseClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import is_ansible_debug
from library.module_utils.network.f5.icontrol import iControlRestSession
except ImportError:
from ansible.module_utils.network.f5.common import F5BaseClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import is_ansible_debug
from ansible.module_utils.network.f5.icontrol import iControlRestSession
class F5Client(F5BaseClient):
def __init__(self, *args, **kwargs):
super(F5Client, self).__init__(*args, **kwargs)
self.provider = self.merge_provider_params()
@property
def api(self):
exc = None
if self._client:
return self._client
for x in range(0, 10):
try:
result = ManagementRoot(
self.provider['server'],
self.provider['user'],
self.provider['password'],
port=self.provider['server_port'],
verify=self.provider['validate_certs']
)
self._client = result
return self._client
except Exception as ex:
exc = ex
time.sleep(1)
error = 'Unable to connect to {0} on port {1}.'.format(
self.provider['server'], self.provider['server_port']
)
if exc is not None:
error += ' The reported error was "{0}".'.format(str(exc))
raise F5ModuleError(error)
class F5RestClient(F5BaseClient):
def __init__(self, *args, **kwargs):
super(F5RestClient, self).__init__(*args, **kwargs)
self.provider = self.merge_provider_params()
@property
def api(self):
exc = None
if self._client:
return self._client
for x in range(0, 10):
try:
provider = self.provider['auth_provider'] or 'local'
url = "https://{0}:{1}/mgmt/shared/authn/login".format(
self.provider['server'], self.provider['server_port']
)
payload = {
'username': self.provider['user'],
'password': self.provider['password'],
}
# - local is a special provider that is baked into the system and
# has no loginReference
if provider != 'local':
login_ref = self.get_login_ref(provider)
payload.update(login_ref)
session = iControlRestSession()
session.verify = self.provider['validate_certs']
response = session.post(url, json=payload)
if response.status not in [200]:
raise F5ModuleError('Status code: {0}. Unexpected Error: {1} for uri: {2}\nText: {3}'.format(
response.status, response.reason, response.url, response.content
))
session.headers['X-F5-Auth-Token'] = response.json()['token']['token']
self._client = session
return self._client
except Exception as ex:
exc = ex
time.sleep(1)
error = 'Unable to connect to {0} on port {1}.'.format(
self.provider['server'], self.provider['server_port']
)
if exc is not None:
error += ' The reported error was "{0}".'.format(str(exc))
raise F5ModuleError(error)
def get_login_ref(self, provider):
info = self.read_provider_info_from_device()
uuids = [os.path.basename(os.path.dirname(x['link'])) for x in info['providers'] if '-' in x['link']]
if provider in uuids:
name = self.get_name_of_provider_id(info, provider)
if not name:
raise F5ModuleError(
"No name found for the provider '{0}'".format(provider)
)
return dict(
loginReference=dict(
link="https://localhost/mgmt/cm/system/authn/providers/{0}/{1}/login".format(name, provider)
)
)
names = [os.path.basename(os.path.dirname(x['link'])) for x in info['providers'] if '-' in x['link']]
if names.count(provider) > 1:
raise F5ModuleError(
"Ambiguous auth_provider provided. Please specify a specific provider ID."
)
uuid = self.get_id_of_provider_name(info, provider)
if not uuid:
raise F5ModuleError(
"No name found for the provider '{0}'".format(provider)
)
return dict(
loginReference=dict(
link="https://localhost/mgmt/cm/system/authn/providers/{0}/{1}/login".format(provider, uuid)
)
)
def get_name_of_provider_id(self, info, provider):
# Add slashes to the provider name so that it specifically finds the provider
# as part of the URL and not a part of another substring
provider = '/' + provider + '/'
for x in info['providers']:
if x['link'].find(provider) > -1:
return x['name']
return None
def get_id_of_provider_name(self, info, provider):
for x in info['providers']:
if x['name'] == provider:
return os.path.basename(os.path.dirname(x['link']))
return None
def read_provider_info_from_device(self):
uri = "https://{0}:{1}/info/system".format(
self.provider['server'], self.provider['server_port']
)
session = iControlRestSession()
session.verify = self.provider['validate_certs']
resp = session.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
| gpl-3.0 | 5,540,900,257,796,182,000 | 36.983051 | 113 | 0.55972 | false |
alex/pip | pip/index.py | 3 | 38101 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, canonicalize_name)
from pip.utils.deprecation import RemovedInPip9Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip9Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, urllib_parse.quote(project_name.lower()))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self._find_all_versions(req.name)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in _versions
]
applicable_candidates = self._sort_versions(applicable_candidates)
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and not applicable_candidates:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
not applicable_candidates or
applicable_candidates[0].version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
applicable_candidates[0].version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(str(c.version) for c in applicable_candidates) or
"none",
)
raise BestVersionAlreadyInstalled
selected_candidate = applicable_candidates[0]
logger.debug(
'Using version %s (newest of versions: %s)',
selected_candidate.version,
', '.join(str(c.version) for c in applicable_candidates)
)
return selected_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
self._log_skipped_link(
link,
"it is a pypi-hosted binary "
"Wheel on an unsupported platform",
)
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', DeprecationWarning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| mit | -2,887,185,659,788,923,000 | 35.012287 | 79 | 0.553004 | false |
faroit/mir_eval | tests/test_sonify.py | 1 | 3340 | """ Unit tests for sonification methods """
import mir_eval
import numpy as np
import scipy
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs, length=fs*11)
assert len(signal) == 11*fs
def test_pitch_contour():
# Generate some random pitch
fs = 8000
times = np.linspace(0, 5, num=5 * fs, endpoint=True)
noise = scipy.ndimage.gaussian_filter1d(np.random.randn(len(times)),
sigma=256)
freqs = 440.0 * 2.0**(16 * noise)
# negate a bunch of sequences
idx = np.unique(np.random.randint(0, high=len(times), size=32))
for start, end in zip(idx[::2], idx[1::2]):
freqs[start:end] *= -1
# Test with inferring duration
x = mir_eval.sonify.pitch_contour(times, freqs, fs)
assert len(x) == fs * 5
# Test with an explicit duration
# This forces the interpolator to go off the end of the sampling grid,
# which should result in a constant sequence in the output
x = mir_eval.sonify.pitch_contour(times, freqs, fs, length=fs * 7)
assert len(x) == fs * 7
assert np.allclose(x[-fs * 2:], x[-fs * 2])
# Test with an explicit duration and a fixed offset
# This forces the interpolator to go off the beginning of
# the sampling grid, which should result in a constant output
x = mir_eval.sonify.pitch_contour(times + 5.0, freqs, fs, length=fs * 7)
assert len(x) == fs * 7
assert np.allclose(x[:fs * 5], x[0])
| mit | -2,593,659,800,626,248,700 | 36.111111 | 77 | 0.577545 | false |
crakensio/django_training | lib/python2.7/site-packages/django/contrib/auth/tests/test_auth_backends.py | 97 | 19207 | from __future__ import unicode_literals
from datetime import date
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.tests.custom_user import ExtensionUser, CustomPermissionsUser, CustomUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.contrib.auth import authenticate, get_user
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth.hashers import MD5PasswordHasher
class CountingMD5PasswordHasher(MD5PasswordHasher):
"""Hasher that counts how many times it computes a hash."""
calls = 0
def encode(self, *args, **kwargs):
type(self).calls += 1
return super(CountingMD5PasswordHasher, self).encode(*args, **kwargs)
class BaseModelBackendTest(object):
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.create_users()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions() == set(['auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), set(['auth.test2', 'auth.test', 'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
exp = set(['auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set(['auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.tests.test_auth_backends.CountingMD5PasswordHasher',))
def test_authentication_timing(self):
"""Hasher is run once regardless of whether the user exists. Refs #20760."""
# Re-set the password, because this tests overrides PASSWORD_HASHERS
self.user.set_password('test')
self.user.save()
CountingMD5PasswordHasher.calls = 0
username = getattr(self.user, self.UserModel.USERNAME_FIELD)
authenticate(username=username, password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
CountingMD5PasswordHasher.calls = 0
authenticate(username='no_such_user', password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
@skipIfCustomUser
class ModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the default User model.
"""
UserModel = User
def create_users(self):
self.user = User.objects.create_user(
username='test',
email='[email protected]',
password='test',
)
self.superuser = User.objects.create_superuser(
username='test2',
email='[email protected]',
password='test',
)
@override_settings(AUTH_USER_MODEL='auth.ExtensionUser')
class ExtensionUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the custom ExtensionUser model.
This isn't a perfect test, because both the User and ExtensionUser are
synchronized to the database, which wouldn't ordinary happen in
production. As a result, it doesn't catch errors caused by the non-
existence of the User table.
The specific problem is queries on .filter(groups__user) et al, which
makes an implicit assumption that the user model is called 'User'. In
production, the auth.User table won't exist, so the requested join
won't exist either; in testing, the auth.User *does* exist, and
so does the join. However, the join table won't contain any useful
data; for testing, we check that the data we expect actually does exist.
"""
UserModel = ExtensionUser
def create_users(self):
self.user = ExtensionUser._default_manager.create_user(
username='test',
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = ExtensionUser._default_manager.create_superuser(
username='test2',
email='[email protected]',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomPermissionsUser')
class CustomPermissionsUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the CustomPermissionsUser model.
As with the ExtensionUser test, this isn't a perfect test, because both
the User and CustomPermissionsUser are synchronized to the database,
which wouldn't ordinary happen in production.
"""
UserModel = CustomPermissionsUser
def create_users(self):
self.user = CustomPermissionsUser._default_manager.create_user(
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = CustomPermissionsUser._default_manager.create_superuser(
email='[email protected]',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserModelBackendAuthenticateTest(TestCase):
"""
Tests that the model backend can accept a credentials kwarg labeled with
custom user model's USERNAME_FIELD.
"""
def test_authenticate(self):
test_user = CustomUser._default_manager.create_user(
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
authenticated_user = authenticate(email='[email protected]', password='test')
self.assertEqual(test_user, authenticated_user)
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
return True
elif not user.is_active and perm == 'inactive':
return True
return False
def has_module_perms(self, user, app_label):
if not user.is_anonymous() and not user.is_active:
return False
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
@skipIfCustomUser
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
backend = 'django.contrib.auth.tests.test_auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user2 = User.objects.create_user('test2', '[email protected]', 'test')
self.user3 = User.objects.create_user('test3', '[email protected]', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
# The get_group_permissions test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple']))
self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced']))
self.assertEqual(self.user2.get_all_permissions(), set([]))
def test_get_group_permissions(self):
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm']))
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend.
"""
backend = 'django.contrib.auth.tests.test_auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['anon']))
@skipIfCustomUser
@override_settings(AUTHENTICATION_BACKENDS=[])
class NoBackendsTest(TestCase):
"""
Tests that an appropriate error is raised if no auth backends are provided.
"""
def setUp(self):
self.user = User.objects.create_user('test', '[email protected]', 'test')
def test_raises_exception(self):
self.assertRaises(ImproperlyConfigured, self.user.has_perm, ('perm', TestObj(),))
@skipIfCustomUser
class InActiveUserBackendTest(TestCase):
"""
Tests for a inactive user
"""
backend = 'django.contrib.auth.tests.test_auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user1.is_active = False
self.user1.save()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('inactive', TestObj()), True)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
class PermissionDeniedBackend(object):
"""
Always raises PermissionDenied.
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username=None, password=None):
raise PermissionDenied
@skipIfCustomUser
class PermissionDeniedBackendTest(TestCase):
"""
Tests that other backends are not checked once a backend raises PermissionDenied
"""
backend = 'django.contrib.auth.tests.test_auth_backends.PermissionDeniedBackend'
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user1.save()
@override_settings(AUTHENTICATION_BACKENDS=(backend, ) +
tuple(settings.AUTHENTICATION_BACKENDS))
def test_permission_denied(self):
"user is not authenticated after a backend raises permission denied #2550"
self.assertEqual(authenticate(username='test', password='test'), None)
@override_settings(AUTHENTICATION_BACKENDS=tuple(
settings.AUTHENTICATION_BACKENDS) + (backend, ))
def test_authenticates(self):
self.assertEqual(authenticate(username='test', password='test'), self.user1)
class NewModelBackend(ModelBackend):
pass
@skipIfCustomUser
class ChangedBackendSettingsTest(TestCase):
"""
Tests for changes in the settings.AUTHENTICATION_BACKENDS
"""
backend = 'django.contrib.auth.tests.test_auth_backends.NewModelBackend'
TEST_USERNAME = 'test_user'
TEST_PASSWORD = 'test_password'
TEST_EMAIL = '[email protected]'
def setUp(self):
User.objects.create_user(self.TEST_USERNAME,
self.TEST_EMAIL,
self.TEST_PASSWORD)
@override_settings(AUTHENTICATION_BACKENDS=(backend, ))
def test_changed_backend_settings(self):
"""
Tests that removing a backend configured in AUTHENTICATION_BACKENDS
make already logged-in users disconnect.
"""
# Get a session for the test user
self.assertTrue(self.client.login(
username=self.TEST_USERNAME,
password=self.TEST_PASSWORD)
)
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
# Remove NewModelBackend
with self.settings(AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',)):
# Get the user from the request
user = get_user(request)
# Assert that the user retrieval is successful and the user is
# anonymous as the backend is not longer available.
self.assertIsNotNone(user)
self.assertTrue(user.is_anonymous())
@skipIfCustomUser
class ImproperlyConfiguredUserModelTest(TestCase):
"""
Tests that an exception from within get_user_model is propagated and doesn't
raise an UnboundLocalError.
Regression test for ticket #21439
"""
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.client.login(
username='test',
password='test'
)
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_does_not_shadow_exception(self):
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
self.assertRaises(ImproperlyConfigured, get_user, request)
| cc0-1.0 | -101,636,079,758,617,650 | 36.958498 | 116 | 0.660957 | false |
kunaltyagi/nsiqcppstyle | nsiqcppstyle.py | 1 | 2624 | #!/usr/bin/env python
#
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import csv # @UnusedImport
import urllib # @UnusedImport
try:
import hashlib # @UnusedImport
except ImportError:
import md5 # @UnusedImport
import unittest # @UnusedImport
import platform # @UnusedImport
import sre_compile # @UnusedImport
import shutil # @UnusedImport
def WeAreFrozen():
return hasattr(sys, "frozen")
def ModulePath():
if WeAreFrozen():
return os.path.dirname(
unicode(sys.executable, sys.getfilesystemencoding()))
return os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
def GetRuntimePath():
"Return the path of this tool"
if (sys.platform == "win32"):
runtimePath = ModulePath()
else:
modename = globals()['__name__']
module = sys.modules[modename]
runtimePath = os.path.dirname(module.__file__)
return runtimePath
if __name__ == "__main__":
sys.path.append(GetRuntimePath())
module = __import__("nsiqcppstyle_exe")
sys.exit(module.main())
| gpl-2.0 | 7,175,608,701,901,995,000 | 36.028986 | 74 | 0.714558 | false |
dmsurti/mayavi | mayavi/sources/chaco_reader.py | 3 | 2647 | """A Chaco file reader.
"""
# Author: Suyog Dutt Jain <[email protected]>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Str
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports.
from mayavi.core.source import Source
from mayavi.core.pipeline_info import PipelineInfo
########################################################################
# `ChacoReader` class
########################################################################
class ChacoReader(Source):
"""A Chaco reader.
"""
# The version of this class. Used for persistence.
__version__ = 0
base_name = Str('', desc='basename of the Chaco files')
# The VTK data file reader.
reader = Instance(tvtk.ChacoReader, args=(), allow_none=False,
record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['unstructured_grid'])
########################################
# View related code.
# Our view.
view = View(Group(Item(name='reader', style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# `FileDataSource` interface
######################################################################
def __init__(self, base_name='', configure=True, **traits):
super(ChacoReader, self).__init__(**traits)
if configure:
self.reader.edit_traits(kind='livemodal')
self.base_name = self.reader.base_name
def update(self):
if len(self.base_name) == 0:
return
self.reader.update()
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _base_name_changed(self, value):
if len(value) == 0:
return
else:
self.reader.base_name = value
self._update_reader_output()
def _update_reader_output(self):
self.reader.update()
self.reader.update_information()
self.reader.on_trait_change(self.render)
self.outputs = [self.reader.output]
self.data_changed = True
| bsd-3-clause | -3,196,526,411,355,711,000 | 31.280488 | 74 | 0.496789 | false |
ZaraSeries/repo | script.module.urlresolver/lib/urlresolver/plugins/streaminto.py | 3 | 2808 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
from lib import helpers
from urlresolver import common
from urlresolver.common import i18n
from urlresolver.resolver import UrlResolver, ResolverError
class StreamintoResolver(UrlResolver):
name = "streaminto"
domains = ["streamin.to"]
pattern = '(?://|\.)(streamin\.to)/(?:embed-|)?([0-9A-Za-z]+)'
def __init__(self):
self.net = common.Net()
self.headers = {'User-Agent': common.SMU_USER_AGENT}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'Referer': web_url}
headers.update(self.headers)
html = self.net.http_GET(web_url, headers=headers).content
sources = helpers.scrape_sources(html, patterns=["""file:\s*["'](?P<url>[^"']+)"""])
if sources:
auth = self.__check_auth(media_id)
if not auth:
auth = self.__auth_ip(media_id)
if auth:
return helpers.pick_source(sources) + helpers.append_headers(headers)
else:
raise ResolverError(i18n('no_ip_authorization'))
else:
raise ResolverError('Unable to locate links')
def __auth_ip(self, media_id):
header = i18n('stream_auth_header')
line1 = i18n('auth_required')
line2 = i18n('visit_link')
line3 = i18n('click_pair') % ('http://api.streamin.to/pair')
with common.kodi.CountdownDialog(header, line1, line2, line3) as cd:
return cd.start(self.__check_auth, [media_id])
def __check_auth(self, media_id):
common.logger.log('Checking Auth: %s' % (media_id))
url = 'http://api.streamin.to/pair/check.php'
try: js_result = json.loads(self.net.http_GET(url, headers=self.headers).content)
except ValueError: raise ResolverError('Unusable Authorization Response')
common.logger.log('Auth Result: %s' % (js_result))
return js_result.get('status') == 200
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| gpl-2.0 | -8,922,828,102,668,085,000 | 40.294118 | 92 | 0.630698 | false |
arunlodhi/pymc3 | pymc3/interactive_sampling.py | 10 | 4699 | try:
__IPYTHON__
import IPython
import ipywidgets as widgets
from IPython.core import display
from traitlets import Unicode, Integer, Float
import json
from numpy.random import seed
import time
from .backends.base import MultiTrace
from .sampling import _iter_sample
except (NameError, ImportError):
IPython = False
_no_notebook_error_message = "nbsample can only be run inside IPython Notebook."
if IPython:
__all__ = ['nbsample']
_javascript = """<script type="text/javascript">
require(["widgets/js/widget"], function(WidgetManager){
var ISampleWidget = IPython.WidgetView.extend({
render: function(){
var html = $("<table style='width:100%;'><tr><td style='width:60px'><button>Stop</button></td>"+
"<td class='pymc3-clock' style='width:60px'></td>"+
"<td class='pymc3-progress'>"+
"<div class='bar' style='width:0px; height: 20px; "+
"background-image: linear-gradient(to bottom, #dddddd 0%,#111111 100%)"+
"'> </div></td>"+
"<td class='pymc3-current-samples' style='width:60px;'>0</td>"+
"<td style='width:10px;'>/</td>"+
"<td style='width:60px;' class='pymc3-max-samples'></td>"+
"</tr>"+
"</table>");
this.setElement(html);
this.$el.find("button").click($.proxy(function(){
this.send("stop","stop");
this.$el.find("button").attr("disabled", "disabled");
}, this));
this.model.on('change:max_samples', function(model, value){
this.$el.find(".pymc3-max-samples").text(value);
}, this);
this.model.on('change:clock', function(model, value){
this.$el.find(".pymc3-clock").text(value);
}, this);
this.model.on('change:current_samples', function(model, value){
this.$el.find(".pymc3-current-samples").text(value);
var total_width = this.$el.find(".pymc3-progress").width()-5;
var total_samples = this.model.get("max_samples");
var width = value * total_width / total_samples;
this.$el.find(".pymc3-progress .bar").width(width)
}, this);
}
});
WidgetManager.register_widget_view('ISampleWidget', ISampleWidget)
});
</script>
"""
class ISampleWidget(widgets.DOMWidget):
_view_name = Unicode('ISampleWidget', sync=True)
current_samples = Integer(sync=True)
max_samples = Integer(sync=True)
clock = Unicode(sync=True)
def __init__(self, *args, **kwargs):
widgets.DOMWidget.__init__(self,*args, **kwargs)
self.iteration = 0
self.on_msg(self._handle_custom_msg)
self.send_state()
self.stopped = False
def _handle_custom_msg(self, message):
if message == "stop":
self.stopped = True
def nbsample(draws, step, start=None, trace=None, chain=0, tune=None, model=None, random_seed=None):
try:
assert(hasattr(IPython.get_ipython(), 'comm_manager'))
except (AssertionError, NameError, KeyError) as e:
raise NotImplementedError(_no_notebook_error_message)
display.display_html(_javascript, raw=True)
w = ISampleWidget()
display.display(w)
t_start = time.time()
t_last = time.time()
w.max_samples = draws
w.current_samples = 0
sampling = _iter_sample(draws, step, start=start, trace=trace,
chain=chain, tune=tune, model=model,
random_seed=random_seed)
for i, trace in enumerate(sampling, 1):
elapsed = time.time() - t_start
elapsed_last = time.time() - t_last
if elapsed_last > 0.1:
t_last = time.time()
w.current_samples = i
w.clock = "%02i:%02i:%02i" % (elapsed / 60 / 60, elapsed / 60 % 60, elapsed % 60)
get_ipython().kernel.do_one_iteration()
if w.stopped:
trace.close()
break
w.current_samples = i
return MultiTrace([trace])
else:
def nbsample(*args, **kwargs):
raise NotImplemented(_no_notebook_error_message)
| apache-2.0 | -410,070,160,164,894,400 | 40.955357 | 112 | 0.51628 | false |
dnozay/lettuce | tests/integration/lib/Django-1.3/django/core/servers/fastcgi.py | 289 | 6402 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
from django.utils import importlib
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default fcgi)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads.
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads.
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing.
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except:
print "Can't import flup." + flup_module
return False
# Prep up and go
from django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| gpl-3.0 | 1,722,171,115,303,504,600 | 33.983607 | 95 | 0.628085 | false |
gdgellatly/OCB1 | addons/delivery/partner.py | 57 | 1474 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_delivery_carrier': fields.property(
'delivery.carrier',
type='many2one',
relation='delivery.carrier',
string="Delivery Method",
view_load=True,
help="This delivery method will be used when invoicing from picking."),
}
res_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,834,567,405,601,809,200 | 35.85 | 81 | 0.609227 | false |
swilcox/pinax-blog | pinax/blog/admin.py | 3 | 2677 | from django.contrib import admin
from django.utils import timezone
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _
from .forms import AdminPostForm
from .models import Post, Image, ReviewComment, Section
from .utils import can_tweet
class ImageInline(admin.TabularInline):
model = Image
fields = ["image_path"]
class ReviewInline(admin.TabularInline):
model = ReviewComment
def make_published(modeladmin, request, queryset):
queryset = queryset.exclude(state=Post.STATE_CHOICES[-1][0], published__isnull=False)
queryset.update(state=Post.STATE_CHOICES[-1][0])
queryset.filter(published__isnull=True).update(published=timezone.now())
make_published.short_description = _("Publish selected posts")
class PostAdmin(admin.ModelAdmin):
list_display = ["title", "state", "section", "published", "show_secret_share_url"]
list_filter = ["section", "state"]
form = AdminPostForm
actions = [make_published]
fields = [
"section",
"title",
"slug",
"author",
"markup",
"teaser",
"content",
"description",
"primary_image",
"sharable_url",
"state"
]
readonly_fields = ["sharable_url"]
if can_tweet():
fields.append("tweet")
prepopulated_fields = {"slug": ("title",)}
inlines = [
ImageInline,
ReviewInline,
]
def show_secret_share_url(self, obj):
return '<a href="%s">%s</a>' % (obj.sharable_url, obj.sharable_url)
show_secret_share_url.short_description = _("Share this url")
show_secret_share_url.allow_tags = True
def formfield_for_dbfield(self, db_field, **kwargs):
request = kwargs.get("request")
if db_field.name == "author":
ff = super(PostAdmin, self).formfield_for_dbfield(db_field, **kwargs)
ff.initial = request.user.id
return ff
return super(PostAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_form(self, request, obj=None, **kwargs):
kwargs.update({
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
})
return super(PostAdmin, self).get_form(request, obj, **kwargs)
def save_form(self, request, form, change):
# this is done for explicitness that we want form.save to commit
# form.save doesn't take a commit kwarg for this reason
return form.save()
class SectionAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Post, PostAdmin)
admin.site.register(Image)
admin.site.register(Section, SectionAdmin)
| mit | -7,627,998,283,907,405,000 | 30.127907 | 89 | 0.650355 | false |
gengjiawen/AndroidHelper | aar_util/aar_util.py | 1 | 1841 | import os
import shutil
import tempfile
import zipfile
from utils.file_util import get_files_by_re, gen_new_file_extension
def get_aar_files(proj_dir, des_dir):
rel_aar_dir = r"build\outputs\aar"
aar_dirs = [os.path.join(proj_dir, i) for i in os.listdir(proj_dir) if os.path.isdir(os.path.join(proj_dir, i))]
aar_dirs = [os.path.join(i, rel_aar_dir) for i in aar_dirs if os.path.exists(os.path.join(i, rel_aar_dir))]
for i in aar_dirs:
file = os.listdir(i)[0]
debug_aar = os.path.join(i, file)
print(debug_aar)
os.makedirs(des_dir, exist_ok=True)
shutil.copyfile(debug_aar, os.path.join(des_dir, file))
def using_local_aar(aar_dir):
# http://stackoverflow.com/a/24894387/1713757
# or you can just do it in android studio ui
s = 'configurations.maybeCreate("default")'
for i in os.listdir(aar_dir):
if i.endswith("aar"):
print("aar:", i)
t = "artifacts.add(\"default\", file('{}'))\n".format(i)
s += t
print(s)
build_script = os.path.join(aar_dir, "build.gradle")
open(build_script, mode='w', encoding='utf-8').write(s)
aar_module_name = os.path.basename(aar_dir)
print("add this to setting.gradle: ")
print("include ':{}'".format(aar_module_name))
print("\nadd this to mudule using aars: ")
print("compile project(':{}')".format(aar_module_name))
def extract_aar2jar(aar_dir):
aar_files = get_files_by_re(aar_dir, ".*aar")
for i in aar_files:
jar_name = gen_new_file_extension(i, "jar")
with zipfile.ZipFile(i, "r") as z:
temp_dir = tempfile.mkdtemp()
z.extract("classes.jar", temp_dir)
if os.path.exists(jar_name):
os.remove(jar_name)
shutil.move(os.path.join(temp_dir, "classes.jar"), jar_name)
| mit | 1,090,207,391,093,984,800 | 35.82 | 116 | 0.612167 | false |
SysTheron/adhocracy | src/adhocracy/websetup.py | 2 | 1955 | """Setup the adhocracy application"""
import logging
import os
import os.path
import pylons
import pylons.test
from pylons import config
import migrate.versioning.api as migrateapi
try:
from migrate.versioning.exceptions import DatabaseAlreadyControlledError
from migrate.versioning.exceptions import DatabaseNotControlledError
except ImportError:
# location changed in 0.6.1
from migrate.exceptions import DatabaseAlreadyControlledError
from migrate.exceptions import DatabaseNotControlledError
from adhocracy.config.environment import load_environment
from adhocracy.lib import install
from adhocracy.model import meta
log = logging.getLogger(__name__)
def setup_app(command, conf, vars):
"""Place any commands to setup adhocracy here"""
if not pylons.test.pylonsapp:
conf = load_environment(conf.global_conf, conf.local_conf, with_db=False)
_setup(conf)
def _setup(config):
# disable delayed execution
# config['adhocracy.amqp.host'] = None
# FIXME: still do this with rq instead of rabbitmq
# NOTE: this is called from tests so it may have side effects
# Create the tables if they don't already exist
url = config.get('sqlalchemy.url')
migrate_repo = os.path.join(os.path.dirname(__file__), 'migration')
repo_version = migrateapi.version(migrate_repo)
if config.get('adhocracy.setup.drop', "OH_NOES") == "KILL_EM_ALL":
meta.data.drop_all(bind=meta.engine)
meta.engine.execute("DROP TABLE IF EXISTS migrate_version")
try:
db_version = migrateapi.db_version(url, migrate_repo)
if db_version < repo_version:
migrateapi.upgrade(url, migrate_repo)
initial_setup = False
except DatabaseNotControlledError:
meta.data.create_all(bind=meta.engine)
migrateapi.version_control(url, migrate_repo, version=repo_version)
initial_setup = True
install.setup_entities(config, initial_setup)
| agpl-3.0 | 196,285,035,216,708,300 | 32.135593 | 81 | 0.725831 | false |
JFriel/honours_project | venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.py | 117 | 2556 | #!/usr/bin/env python
"""Test functions for fftpack.helper module
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy import fft
from numpy import pi
class TestFFTShift(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
class TestFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN(TestCase):
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | -3,015,139,154,835,288,000 | 31.769231 | 79 | 0.572379 | false |
slundberg/Mocha.jl | docs/conf.py | 23 | 8570 | # -*- coding: utf-8 -*-
#
# Mocha documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 13 00:43:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("sphinx"))
import julia
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'julia'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mocha'
copyright = u'2014, pluskid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.9'
# The full version, including alpha/beta/rc tags.
release = '0.0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'jl'
highlight_language = 'julia'
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin theme
html_theme = 'default'
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mochadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Mocha.tex', u'Mocha Documentation',
u'pluskid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mocha', u'Mocha Documentation',
[u'pluskid'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Mocha', u'Mocha Documentation',
u'pluskid', 'Mocha', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 1,705,881,141,127,873,800 | 29.938628 | 79 | 0.703034 | false |
waymarkedtrails/waymarked-trails-site | db/tables/route_nodes.py | 2 | 3914 | # This file is part of the Waymarked Trails Map Project
# Copyright (C) 2015 Sarah Hoffmann
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Various tables for nodes in a route network.
"""
from re import compile as re_compile
import sqlalchemy as sa
from geoalchemy2 import Geometry
from osgende.generic import TransformedTable
from osgende.common.tags import TagStore
from db.configs import GuidePostConfig, NetworkNodeConfig
from db import conf
GUIDEPOST_CONF = conf.get('GUIDEPOSTS', GuidePostConfig)
class GuidePosts(TransformedTable):
""" Information about guide posts. """
elepattern = re_compile('[\\d.]+')
def __init__(self, meta, source, updates):
self.srid = meta.info.get('srid', source.c.geom.type.srid)
super().__init__(meta, GUIDEPOST_CONF.table_name, source)
self.updates = updates
def add_columns(self, table, src):
table.append_column(sa.Column('name', sa.String))
table.append_column(sa.Column('ele', sa.String))
table.append_column(sa.Column('geom', Geometry('POINT', srid=self.srid)))
def before_update(self, engine):
# save all added guideposts
sql = sa.except_(sa.select([self.src.c.geom.ST_Transform(self.srid)])
.where(self.src.c.id.in_(self.src.select_add_modify())),
sa.select([self.c.geom])
.where(self.c.id.in_(self.src.select_add_modify())))
self.updates.add_from_select(engine, sql)
def transform(self, obj):
tags = TagStore(obj['tags'])
# filter by subtype
if GUIDEPOST_CONF.subtype is not None:
booltags = tags.get_booleans()
if len(booltags) > 0:
if not booltags.get(GUIDEPOST_CONF.subtype, False):
return None
else:
if GUIDEPOST_CONF.require_subtype:
return None
outtags = { 'name' : tags.get('name'), 'ele' : None }
if 'ele'in tags:
m = self.elepattern.search(tags['ele'])
if m:
outtags['ele'] = m.group(0)
# XXX check for ft
if self.srid == self.src.c.geom.type.srid:
outtags['geom'] = obj['geom']
else:
outtags['geom'] = obj['geom'].ST_Transform(self.srid)
return outtags
NETWORKNODE_CONF = conf.get('NETWORKNODES', NetworkNodeConfig)
class NetworkNodes(TransformedTable):
""" Information about referenced nodes in a route network.
"""
def __init__(self, meta, source):
self.srid = meta.info.get('srid', source.c.geom.type.srid)
super().__init__(meta, NETWORKNODE_CONF.table_name, source)
def add_columns(self, table, src):
table.append_column(sa.Column('name', sa.String))
table.append_column(sa.Column('geom', Geometry('POINT', srid=self.srid)))
def transform(self, obj):
tags = TagStore(obj['tags'])
if NETWORKNODE_CONF.node_tag not in tags:
return None
outtags = { 'name' : tags[NETWORKNODE_CONF.node_tag] }
if self.srid == self.src.c.geom.type.srid:
outtags['geom'] = obj['geom']
else:
outtags['geom'] = obj['geom'].ST_Transform(self.srid)
return outtags
| gpl-3.0 | -8,747,056,517,793,661,000 | 34.908257 | 83 | 0.629024 | false |
Yawning/or-applet | orapplet/status_icon.py | 1 | 12289 | #!/usr/bin/env python2
# This file is part of or-applet.
#
# or-applet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# or-applet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with or-applet. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen
from gi.repository import Gtk,Gdk
from stem import CircBuildFlag,CircClosureReason,CircPurpose,CircStatus,HiddenServiceState
from orapplet.utils import get_leek_icon
def _pos(menu, x, y, icon):
print(x)
print(y)
print(icon)
return (Gtk.StatusIcon.position_menu(menu, x, y, icon))
def _format_circuit(circuit):
s = 'Circuit: ' + circuit.id + '\n'
s += 'Created: ' + str(circuit.created) + '\n'
s += 'Status: ' + _format_status(circuit.status) + '\n'
s += 'Purpose: ' + _format_purpose(circuit.purpose) + '\n'
s += 'Flags: \n' + _format_build_flags(circuit.build_flags)
if circuit.hs_state is not None:
s += 'HS State: ' + _format_hs_state(circuit.hs_state) + '\n'
if circuit.path is not None and circuit.path:
s += 'Path:\n'
s += _format_path(circuit.path)
if circuit.reason is not None:
s += 'Local Close Reason: ' + _format_close_reason(circuit.reason)
if circuit.remote_reason is not None:
s += 'Remote Close Reason: ' + _format_close_reason(circuit.remote_reason)
return s
_FORMAT_STATUSES = {
CircStatus.LAUNCHED: 'LAUNCHED (circuit ID assigned to new circuit)',
CircStatus.BUILT: 'BUILT (all hops finished, can now accept streams)',
CircStatus.EXTENDED: 'EXTENDED (one more hop has been completed)',
CircStatus.FAILED: 'FAILED (circuit closed (was not built))',
CircStatus.CLOSED: 'CLOSED (circuit closed (was built))'
}
def _format_status(status):
return _FORMAT_STATUSES.get(status, str(status))
_FORMAT_PURPOSES = {
CircPurpose.GENERAL: 'GENERAL (circuit for AP and/or directory request streams)',
CircPurpose.HS_CLIENT_INTRO: 'HS_CLIENT_INTRO (HS client-side introduction-point circuit)',
CircPurpose.HS_CLIENT_REND: 'HS_CLIENT_REND (HS client-side rendezvous circuit; carries AP streams)',
CircPurpose.HS_SERVICE_INTRO: 'HS_SERVICE_INTRO (HS service-side introduction-point circuit)',
CircPurpose.HS_SERVICE_REND: 'HS_SERVICE_REND (HS service-side rendezvous circuit)',
CircPurpose.TESTING: 'TESTING (reachability-testing circuit; carries no traffic)',
CircPurpose.CONTROLLER: 'CONTROLLER (circuit built by a controller)',
CircPurpose.MEASURE_TIMEOUT: 'MEASURE_TIMEOUT (circuit being kept around to see how long it takes)'
}
_HS_PURPOSES = [
CircPurpose.HS_CLIENT_INTRO,
CircPurpose.HS_CLIENT_REND,
CircPurpose.HS_SERVICE_INTRO,
CircPurpose.HS_SERVICE_REND
]
def _filter_circuit(circuit):
if CircBuildFlag.IS_INTERNAL in circuit.build_flags:
return circuit.purpose in _HS_PURPOSES
return True
def _format_purpose(purpose):
return _FORMAT_PURPOSES.get(purpose, str(purpose))
_FORMAT_FLAGS = {
CircBuildFlag.ONEHOP_TUNNEL: 'ONEHOP_TUNNEL (one-hop circuit, used for tunneled directory conns)',
CircBuildFlag.IS_INTERNAL: 'IS_INTERNAL (internal circuit, not to be used for exiting streams)',
CircBuildFlag.NEED_CAPACITY: 'NEED_CAPACITY (this circuit must use only high-capacity nodes)',
CircBuildFlag.NEED_UPTIME: 'NEED_UPTIME (this circuit must use only high-uptime nodes)'
}
def _format_build_flags(flags):
s_list = []
for flag in flags:
s_list.append(' %s\n' % _FORMAT_FLAGS.get(flag, str(flag)))
return ''.join(s_list)
def _format_path(path):
s_list = []
idx = 0
for hop in path:
s_list.append(' [%d]: %s~%s\n' % (idx, hop[0], hop[1]))
idx += 1
return ''.join(s_list)
_FORMAT_HS_STATE = {
HiddenServiceState.HSCI_CONNECTING: 'HSCI_CONNECTING (connecting to intro point)',
HiddenServiceState.HSCI_INTRO_SENT: 'HSCI_INTRO_SENT (sent INTRODUCE1; waiting for reply from IP)',
HiddenServiceState.HSCI_DONE: 'HSCI_DONE (received reply from IP relay; closing)',
HiddenServiceState.HSCR_CONNECTING: 'HSCR_CONNECTING (connecting to or waiting for reply from RP)',
HiddenServiceState.HSCR_ESTABLISHED_IDLE: 'HSCR_ESTABLISHED_IDLE (established RP; waiting for introduction)',
HiddenServiceState.HSCR_ESTABLISHED_WAITING: 'HSCR_ESTABLISHED_WAITING (introduction sent to HS; waiting for rend)',
HiddenServiceState.HSCR_JOINED: 'HSCR_JOINED (connected to HS)',
HiddenServiceState.HSSI_CONNECTING: 'HSSI_CONNECTING (connecting to intro point)',
HiddenServiceState.HSSI_ESTABLISHED: 'HSSI_ESTABLISHED (established intro point)',
HiddenServiceState.HSSR_CONNECTING: 'HSSR_CONNECTING (connecting to client\'s rend point)',
HiddenServiceState.HSSR_JOINED: 'HSSR_JOINED (connected to client\'s RP circuit)',
}
def _format_hs_state(hs_state):
return _FORMAT_HS_STATE.get(hs_state, str(hs_state))
def _format_close_reason(reason):
# Fuck it, these shouldn't show up in normal use anyway.
return str(reason)
def _format_streams(streams):
s_list = []
for stream in streams:
s_list.append(' %s\n' % stream)
return ''.join(s_list)
def _labeled_separator(label):
box = Gtk.Box()
label = Gtk.Label(label) # set_markup?
box.pack_start(Gtk.HSeparator(), True, True, 0)
box.pack_start(label, False, False, 2)
box.pack_start(Gtk.HSeparator(), True, True, 0)
item = Gtk.ImageMenuItem()
item.set_property('child', box)
item.set_sensitive(False)
return item
class PopupMenu(object):
_ctl = None
_menu = None
_status_icon = None
def __init__(self, icon):
self._ctl = icon._ctl
self._status_icon = icon
self._menu = Gtk.Menu()
item = Gtk.MenuItem('Stem Prompt')
item.connect('activate', self._on_prompt)
self._menu.append(item)
item = Gtk.MenuItem('Reload Tor Config')
item.connect('activate', self._on_reload)
self._menu.append(item)
self._menu.append(Gtk.SeparatorMenuItem())
item = Gtk.MenuItem('About')
item.connect('activate', self._on_about)
self._menu.append(item)
self._menu.show_all()
def popup(self, widget, button, time):
self._menu.popup(None, None, _pos, self._status_icon._icon, button, time)
def _on_prompt(self, widget, data=None):
Popen('/usr/bin/urxvt -e python2 -c "from stem.interpreter import main; main()"', shell=True)
def _on_reload(self, widget, data=None):
self._ctl.reload()
def _on_about(self, widget, data=None):
about_dialog = Gtk.AboutDialog()
about_dialog.set_destroy_with_parent(True)
about_dialog.set_program_name('or-applet')
about_dialog.set_copyright('Copyright 2014 Yawning Angel')
about_dialog.set_comments('A Gtk+ Tor System Tray applet.')
about_dialog.set_version('0.0.1')
about_dialog.set_authors(['Yawning Angel'])
about_dialog.set_artists(['Robin Weatherall http://www.robinweatherall.eu'])
about_dialog.run()
about_dialog.destroy()
class ActivateMenu(object):
_ctl = None
_clipboard = None
_menu = None
_status_icon = None
def __init__(self, icon):
self._ctl = icon._ctl
self._clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self._status_icon = icon
self._menu = Gtk.Menu()
item = Gtk.MenuItem('NEWNYM')
item.set_sensitive(self._ctl.is_newnym_available())
item.connect('activate', self._on_newnym)
self._menu.append(item)
self._menu.append(Gtk.SeparatorMenuItem())
self._build_dynamic_menu()
self._menu.show_all()
def popup(self, widget, button, time):
self._menu.popup(None, None, _pos, self._status_icon._icon, button, time)
def _build_dynamic_menu(self):
circuits = self._ctl.get_circuits()
if circuits is None:
item = Gtk.MenuItem('No circuits established')
item.set_sensitive(False)
self._menu.append(item)
return
streams = self._ctl.get_streams()
for circuit in circuits:
self._build_circuit_menu(circuit, streams)
def _build_circuit_menu(self, circuit, streams):
# Skip displaying internal circuits, unless they are actually hidden
# service circuits in disguise.
if not _filter_circuit(circuit):
return
circ_info = _format_circuit(circuit)
our_auth = ""
if circuit.socks_username:
our_auth = circuit.socks_username
if circuit.socks_password:
our_auth += ':' + circuit.socks_password
our_streams = []
if CircPurpose.HS_CLIENT_INTRO in circuit.purpose or CircPurpose.HS_CLIENT_REND in circuit.purpose:
our_streams.append('[HS Client]: %s.onion' % circuit.rend_query)
elif CircPurpose.HS_SERVICE_INTRO in circuit.purpose:
our_streams.append('[HS Server Intro]: %s.onion' % circuit.rend_query)
elif CircPurpose.HS_SERVICE_REND in circuit.purpose:
our_streams.append('[HS Server Rend]: %s.onion' % circuit.rend_query)
else:
for stream in streams:
if stream.circ_id == circuit.id:
our_streams.append('[%s]: %s' % (stream.id, stream.target))
if not our_streams:
our_streams.append('No streams established')
stream_info = 'Streams:\n%s' % _format_streams(our_streams)
menu = Gtk.Menu()
stream_sep = ''
if len(our_auth) > 0:
stream_sep = 'Streams (%s)' % our_auth
else:
stream_sep = 'Streams'
menu.append(_labeled_separator(stream_sep))
for s in our_streams:
item = Gtk.MenuItem(s)
menu.append(item)
menu.append(_labeled_separator('Path'))
idx = 0
for hop in circuit.path:
item_text = '[%d]: %s~%s' % (idx, hop[0], hop[1])
item = Gtk.MenuItem(item_text)
menu.append(item)
idx += 1
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.MenuItem('Copy to clipboard')
item.connect('activate', self._on_copy_circuit, circ_info + stream_info)
menu.append(item)
item = Gtk.MenuItem('Close circuit')
if CircPurpose.HS_SERVICE_INTRO not in circuit.purpose and CircPurpose.HS_SERVICE_REND not in circuit.purpose:
item.connect('activate', self._on_close_circuit, circuit.id)
else:
item.set_sensitive(False)
menu.append(item)
item = Gtk.MenuItem('Circuit: ' + circuit.id)
item.set_submenu(menu)
self._menu.append(item)
def _on_newnym(self, widget, data=None):
self._ctl.newnym()
def _on_copy_circuit(self, widget, data=None):
self._clipboard.set_text(data, -1)
def _on_close_circuit(self, widget, data=None):
self._ctl.close_circuit(data)
class OrStatusIcon(object):
_ctl = None
_icon = None
_menu_popup = None
_activate_menu = None
def __init__(self, ctl):
self._ctl = ctl
self._menu_popup = PopupMenu(self)
self._icon = Gtk.StatusIcon()
self._icon.set_from_file(get_leek_icon())
self._icon.connect('activate', self._on_activate)
self._icon.connect('popup-menu', self._menu_popup.popup)
self._icon.set_visible(True)
def set_tooltip_text(self, text):
self._icon.set_tooltip_text(text)
def pos(self, menu, icon):
return (Gtk.StatusIcon.position_menu(menu, icon))
def _on_activate(self, widget, data=None):
# Fucking python GCs the menu unless I stash it in a local.
self._activate_menu = ActivateMenu(self)
self._activate_menu.popup(self._activate_menu, 1, Gtk.get_current_event_time())
| gpl-3.0 | 4,973,378,940,227,673,000 | 37.523511 | 120 | 0.647652 | false |
deadblue/baidupan_shell | pyasn1/type/univ.py | 185 | 39731 | # ASN.1 "universal" data types
import operator, sys
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __and__(self, value): return self.clone(self._value & value)
def __rand__(self, value): return self.clone(value & self._value)
def __or__(self, value): return self.clone(self._value | value)
def __ror__(self, value): return self.clone(value | self._value)
def __xor__(self, value): return self.clone(self._value ^ value)
def __rxor__(self, value): return self.clone(value ^ self._value)
def __lshift__(self, value): return self.clone(self._value << value)
def __rshift__(self, value): return self.clone(self._value >> value)
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __sub__(self, value): return self.clone(self._value - value)
def __rsub__(self, value): return self.clone(value - self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self.clone(value * self._value)
def __mod__(self, value): return self.clone(self._value % value)
def __rmod__(self, value): return self.clone(value % self._value)
def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value): return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(self._value // value)
def __rdiv__(self, value): return self.clone(value // self._value)
else:
def __truediv__(self, value): return self.clone(self._value / value)
def __rtruediv__(self, value): return self.clone(value / self._value)
def __divmod__(self, value): return self.clone(self._value // value)
def __rdivmod__(self, value): return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self): return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self): return float(self._value)
def __abs__(self): return abs(self._value)
def __index__(self): return int(self._value)
def __lt__(self, value): return self._value < value
def __le__(self, value): return self._value <= value
def __eq__(self, value): return self._value == value
def __ne__(self, value): return self._value != value
def __gt__(self, value): return self._value > value
def __ge__(self, value): return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self): return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self): return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v>>i)&0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % (i,)
)
if j >= len(r):
r.extend([0]*(j-len(r)+1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__intValue = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, (tuple, list)):
try:
return ''.join([ chr(x) for x in value ])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8; byte = 0; r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p+v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p+'0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple([ ord(x) for x in value ])
else:
numbers = tuple(value)
if [ x for x in numbers if x < 32 or x > 126 ]:
return '0x' + ''.join([ '%.2x' % x for x in numbers ])
else:
return str(value)
def __repr__(self):
if self._value is base.noValue:
return self.__class__.__name__ + '()'
if [ x for x in self.asNumbers() if x < 32 or x > 126 ]:
return self.__class__.__name__ + '(hexValue=\'' + ''.join([ '%.2x' % x for x in self.asNumbers() ])+'\')'
else:
return self.__class__.__name__ + '(\'' + self.prettyOut(self._value) + '\')'
if sys.version_info[0] <= 2:
def __str__(self): return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple([ ord(x) for x in self._value ])
return self.__intValue
else:
def __str__(self): return self._value.decode(self._encoding, 'ignore')
def __bytes__(self): return self._value
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple(self._value)
return self.__intValue
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = int
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other): return self.clone(self._value + other)
def __radd__(self, other): return self.clone(other + self._value)
def asTuple(self): return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self): return self.prettyPrint()
def index(self, suboid): return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
class Real(base.AbstractSimpleAsn1Item):
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
for d in value:
if not isinstance(d, intTypes):
raise error.PyAsn1Error(
'Lame Real value syntax: %s' % (value,)
)
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, float):
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
elif isinstance(value, str): # handle infinite literal
try:
return float(value)
except ValueError:
pass
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def isPlusInfinity(self): return self._value == self._plusInf
def isMinusInfinity(self): return self._value == self._minusInf
def isInfinity(self): return self._value in self._inf
def __str__(self): return str(float(self))
def __add__(self, value): return self.clone(float(self) + value)
def __radd__(self, value): return self + value
def __mul__(self, value): return self.clone(float(self) * value)
def __rmul__(self, value): return self * value
def __sub__(self, value): return self.clone(float(self) - value)
def __rsub__(self, value): return self.clone(value - float(self))
def __mod__(self, value): return self.clone(float(self) % value)
def __rmod__(self, value): return self.clone(value % float(self))
def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value): return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(float(self) / value)
def __rdiv__(self, value): return self.clone(value / float(self))
else:
def __truediv__(self, value): return self.clone(float(self) / value)
def __rtruediv__(self, value): return self.clone(value / float(self))
def __divmod__(self, value): return self.clone(float(self) // value)
def __rdivmod__(self, value): return self.clone(value // float(self))
def __int__(self): return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self): return abs(float(self))
def __lt__(self, value): return float(self) < value
def __le__(self, value): return float(self) <= value
def __eq__(self, value): return float(self) == value
def __ne__(self, value): return float(self) != value
def __gt__(self, value): return float(self) > value
def __ge__(self, value): return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(float(self))
else:
def __bool__(self): return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if self._componentType is not None and \
not self._componentType.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %s' % (value,))
def getComponentByPosition(self, idx): return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' '*scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
base.AbstractConstructedAsn1Item.__init__(
self, componentType, tagSet, subtypeSpec, sizeSpec
)
if self._componentType is None:
self._componentTypeLen = 0
else:
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %r vs %r' % (t, value))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name), value,
verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' '*scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0): return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def __len__(self): return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self): pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{ self.getTagSet(): self },
{ eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
self
)
# XXX
# coercion rules?
| gpl-2.0 | 1,314,233,685,782,889,200 | 37.129559 | 117 | 0.549974 | false |
liyu1990/sklearn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause | -1,486,551,899,741,850,000 | 27.92 | 72 | 0.574689 | false |
barthisrael/OmniDB | OmniDB/OmniDB_app/include/paramiko/kex_gex.py | 7 | 10302 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Variant on `KexGroup1 <paramiko.kex_group1.KexGroup1>` where the prime "p" and
generator "g" are provided by the server. A bit more work is required on the
client side, and a **lot** more on the server side.
"""
import os
from hashlib import sha1, sha256
from paramiko import util
from paramiko.common import DEBUG
from paramiko.message import Message
from paramiko.py3compat import byte_chr, byte_ord, byte_mask
from paramiko.ssh_exception import SSHException
(
_MSG_KEXDH_GEX_REQUEST_OLD,
_MSG_KEXDH_GEX_GROUP,
_MSG_KEXDH_GEX_INIT,
_MSG_KEXDH_GEX_REPLY,
_MSG_KEXDH_GEX_REQUEST,
) = range(30, 35)
(
c_MSG_KEXDH_GEX_REQUEST_OLD,
c_MSG_KEXDH_GEX_GROUP,
c_MSG_KEXDH_GEX_INIT,
c_MSG_KEXDH_GEX_REPLY,
c_MSG_KEXDH_GEX_REQUEST,
) = [byte_chr(c) for c in range(30, 35)]
class KexGex(object):
name = "diffie-hellman-group-exchange-sha1"
min_bits = 1024
max_bits = 8192
preferred_bits = 2048
hash_algo = sha1
def __init__(self, transport):
self.transport = transport
self.p = None
self.q = None
self.g = None
self.x = None
self.e = None
self.f = None
self.old_style = False
def start_kex(self, _test_old_style=False):
if self.transport.server_mode:
self.transport._expect_packet(
_MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD
)
return
# request a bit range: we accept (min_bits) to (max_bits), but prefer
# (preferred_bits). according to the spec, we shouldn't pull the
# minimum up above 1024.
m = Message()
if _test_old_style:
# only used for unit tests: we shouldn't ever send this
m.add_byte(c_MSG_KEXDH_GEX_REQUEST_OLD)
m.add_int(self.preferred_bits)
self.old_style = True
else:
m.add_byte(c_MSG_KEXDH_GEX_REQUEST)
m.add_int(self.min_bits)
m.add_int(self.preferred_bits)
m.add_int(self.max_bits)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_GROUP)
def parse_next(self, ptype, m):
if ptype == _MSG_KEXDH_GEX_REQUEST:
return self._parse_kexdh_gex_request(m)
elif ptype == _MSG_KEXDH_GEX_GROUP:
return self._parse_kexdh_gex_group(m)
elif ptype == _MSG_KEXDH_GEX_INIT:
return self._parse_kexdh_gex_init(m)
elif ptype == _MSG_KEXDH_GEX_REPLY:
return self._parse_kexdh_gex_reply(m)
elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD:
return self._parse_kexdh_gex_request_old(m)
msg = "KexGex {} asked to handle packet type {:d}"
raise SSHException(msg.format(self.name, ptype))
# ...internals...
def _generate_x(self):
# generate an "x" (1 < x < (p-1)/2).
q = (self.p - 1) // 2
qnorm = util.deflate_long(q, 0)
qhbyte = byte_ord(qnorm[0])
byte_count = len(qnorm)
qmask = 0xff
while not (qhbyte & 0x80):
qhbyte <<= 1
qmask >>= 1
while True:
x_bytes = os.urandom(byte_count)
x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
x = util.inflate_long(x_bytes, 1)
if (x > 1) and (x < q):
break
self.x = x
def _parse_kexdh_gex_request(self, m):
minbits = m.get_int()
preferredbits = m.get_int()
maxbits = m.get_int()
# smoosh the user's preferred size into our own limits
if preferredbits > self.max_bits:
preferredbits = self.max_bits
if preferredbits < self.min_bits:
preferredbits = self.min_bits
# fix min/max if they're inconsistent. technically, we could just pout
# and hang up, but there's no harm in giving them the benefit of the
# doubt and just picking a bitsize for them.
if minbits > preferredbits:
minbits = preferredbits
if maxbits < preferredbits:
maxbits = preferredbits
# now save a copy
self.min_bits = minbits
self.preferred_bits = preferredbits
self.max_bits = maxbits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException("Can't do server-side gex with no modulus pack")
self.transport._log(
DEBUG,
"Picking p ({} <= {} <= {} bits)".format(
minbits, preferredbits, maxbits
),
)
self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
def _parse_kexdh_gex_request_old(self, m):
# same as above, but without min_bits or max_bits (used by older
# clients like putty)
self.preferred_bits = m.get_int()
# smoosh the user's preferred size into our own limits
if self.preferred_bits > self.max_bits:
self.preferred_bits = self.max_bits
if self.preferred_bits < self.min_bits:
self.preferred_bits = self.min_bits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException("Can't do server-side gex with no modulus pack")
self.transport._log(
DEBUG, "Picking p (~ {} bits)".format(self.preferred_bits)
)
self.g, self.p = pack.get_modulus(
self.min_bits, self.preferred_bits, self.max_bits
)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
self.old_style = True
def _parse_kexdh_gex_group(self, m):
self.p = m.get_mpint()
self.g = m.get_mpint()
# reject if p's bit length < 1024 or > 8192
bitlen = util.bit_length(self.p)
if (bitlen < 1024) or (bitlen > 8192):
raise SSHException(
"Server-generated gex p (don't ask) is out of range "
"({} bits)".format(bitlen)
)
self.transport._log(DEBUG, "Got server p ({} bits)".format(bitlen))
self._generate_x()
# now compute e = g^x mod p
self.e = pow(self.g, self.x, self.p)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_REPLY)
def _parse_kexdh_gex_init(self, m):
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.p - 1):
raise SSHException('Client kex "e" is out of range')
self._generate_x()
self.f = pow(self.g, self.x, self.p)
K = pow(self.e, self.x, self.p)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
key,
)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(H)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
def _parse_kexdh_gex_reply(self, m):
host_key = m.get_string()
self.f = m.get_mpint()
sig = m.get_string()
if (self.f < 1) or (self.f > self.p - 1):
raise SSHException('Server kex "f" is out of range')
K = pow(self.f, self.x, self.p)
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
host_key,
)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
class KexGexSHA256(KexGex):
name = "diffie-hellman-group-exchange-sha256"
hash_algo = sha256
| mit | -3,105,219,090,994,518,500 | 34.89547 | 95 | 0.575519 | false |
numerigraphe/odoo | addons/project/report/project_report.py | 279 | 5789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class report_project_task_user(osv.osv):
_name = "report.project.task.user"
_description = "Tasks by user and project"
_auto = False
_columns = {
'name': fields.char('Task Summary', readonly=True),
'user_id': fields.many2one('res.users', 'Assigned To', readonly=True),
'reviewer_id': fields.many2one('res.users', 'Reviewer', readonly=True),
'date_start': fields.datetime('Assignation Date', readonly=True),
'no_of_days': fields.integer('# of Days', size=128, readonly=True),
'date_end': fields.datetime('Ending Date', readonly=True),
'date_deadline': fields.date('Deadline', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'project_id': fields.many2one('project.project', 'Project', readonly=True),
'hours_planned': fields.float('Planned Hours', readonly=True),
'hours_effective': fields.float('Effective Hours', readonly=True),
'hours_delay': fields.float('Avg. Plan.-Eff.', readonly=True),
'remaining_hours': fields.float('Remaining Hours', readonly=True),
'progress': fields.float('Progress', readonly=True, group_operator='avg'),
'total_hours': fields.float('Total Hours', readonly=True),
'closing_days': fields.float('Days to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the task"),
'opening_days': fields.float('Days to Assign', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to Open the task"),
'delay_endings_days': fields.float('Overpassed Deadline', digits=(16,2), readonly=True),
'nbr': fields.integer('# of Tasks', readonly=True), # TDE FIXME master: rename into nbr_tasks
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')],
string='Priority', size=1, readonly=True),
'state': fields.selection([('normal', 'In Progress'),('blocked', 'Blocked'),('done', 'Ready for next stage')],'Status', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
}
_order = 'name desc, project_id'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'report_project_task_user')
cr.execute("""
CREATE view report_project_task_user as
SELECT
(select 1 ) AS nbr,
t.id as id,
t.date_start as date_start,
t.date_end as date_end,
t.date_last_stage_update as date_last_stage_update,
t.date_deadline as date_deadline,
abs((extract('epoch' from (t.write_date-t.date_start)))/(3600*24)) as no_of_days,
t.user_id,
t.reviewer_id,
progress as progress,
t.project_id,
t.effective_hours as hours_effective,
t.priority,
t.name as name,
t.company_id,
t.partner_id,
t.stage_id as stage_id,
t.kanban_state as state,
remaining_hours as remaining_hours,
total_hours as total_hours,
t.delay_hours as hours_delay,
planned_hours as hours_planned,
(extract('epoch' from (t.write_date-t.create_date)))/(3600*24) as closing_days,
(extract('epoch' from (t.date_start-t.create_date)))/(3600*24) as opening_days,
(extract('epoch' from (t.date_deadline-(now() at time zone 'UTC'))))/(3600*24) as delay_endings_days
FROM project_task t
WHERE t.active = 'true'
GROUP BY
t.id,
remaining_hours,
t.effective_hours,
progress,
total_hours,
planned_hours,
hours_delay,
create_date,
write_date,
date_start,
date_end,
date_deadline,
date_last_stage_update,
t.user_id,
t.reviewer_id,
t.project_id,
t.priority,
name,
t.company_id,
t.partner_id,
stage_id
""")
| agpl-3.0 | -8,095,639,358,768,804,000 | 49.33913 | 143 | 0.534462 | false |
liavkoren/djangoDev | django/db/backends/schema.py | 2 | 38689 | import hashlib
import operator
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.utils import truncate_name
from django.db.models.fields.related import ManyToManyField
from django.db.transaction import atomic
from django.utils.encoding import force_bytes
from django.utils.log import getLogger
from django.utils.six.moves import reduce
from django.utils.six import callable
logger = getLogger('django.db.backends.schema')
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a syncdb run, a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_create_table_unique = "UNIQUE (%(columns)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ";")
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
default_value = self.effective_default(field)
if include_default and default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null:
sql += " NULL"
else:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError('subclasses of BaseDatabaseSchemaEditor for backends which have requires_literal_defaults must provide a prepare_default() method')
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
default = ""
else:
default = None
# If it's a callable, call it
if callable(default):
default = default()
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# Indexes
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# FK
if field.rel:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(
self.sql_create_fk % {
"name": self._create_index_name(model, [field.column], suffix="_fk_%s_%s" % (to_table, to_column)),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers
for fields in model._meta.unique_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
column_sqls.append(self.sql_create_table_unique % {
"columns": ", ".join(self.quote_name(column) for column in columns),
})
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
self.execute(sql, params)
# Add any index_togethers
for fields in model._meta.index_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.create_model(field.rel.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, columns, unique=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_uniq"),
"columns": ", ".join(self.quote_name(column) for column in columns),
})
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, list(columns), index=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created indexes
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# Add any FK constraints later
if field.rel and self.connection.features.supports_foreign_keys:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
self.deferred_sql.append(
self.sql_create_fk % {
"name": '%s_refs_%s_%x' % (
field.column,
to_column,
abs(hash((model._meta.db_table, to_table)))
),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Get the column's definition
definition, params = self.column_sql(model, field)
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None or new_type is None:
raise ValueError("Cannot alter field %s into %s - they are not compatible types (probably means only one is an M2M with implicit through model)" % (
old_field,
new_field,
))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Removed an index?
if old_field.db_index and not new_field.db_index and not old_field.unique and not (not new_field.unique and old_field.unique):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
if strict and len(index_names) != 1:
raise ValueError("Found wrong number (%s) of indexes for %s.%s" % (
len(index_names),
model._meta.db_table,
old_field.column,
))
for index_name in index_names:
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": index_name,
}
)
# Drop any FK constraints, we'll remake them later
if old_field.rel:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(model._meta.db_table),
"name": fk_name,
}
)
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.get_all_related_objects():
rel_fk_names = self._constraint_names(rel.model, [rel.field.column], foreign_key=True)
for fk_name in rel_fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(rel.model._meta.db_table),
"name": fk_name,
}
)
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_check % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
}
)
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self.sql_rename_column % {
"table": self.quote_name(model._meta.db_table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
})
# Next, start accumulating actions to do
actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(model._meta.db_table, new_field.column, new_type)
actions.append(fragment)
post_actions.extend(other_actions)
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
if old_default != new_default:
if new_default is None:
actions.append((
self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
},
[],
))
else:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if new_field.null:
actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
if actions:
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), reduce(operator.add, params))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if not old_field.unique and new_field.unique:
self.execute(
self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
}
)
# Added an index?
if not old_field.db_index and new_field.db_index and not new_field.unique and not (not old_field.unique and new_field.unique):
self.execute(
self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
"extra": "",
}
)
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(new_field.model._meta.get_all_related_objects())
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_pk % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_pk"),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(new_field.model._meta.get_all_related_objects())
# Handle our type alters on the other end of rels from the PK stuff above
for rel in rels_to_update:
rel_db_params = rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
self.execute(
self.sql_alter_column % {
"table": self.quote_name(rel.model._meta.db_table),
"changes": self.sql_alter_column_type % {
"column": self.quote_name(rel.field.column),
"type": rel_type,
}
}
)
# Does it have a foreign key?
if new_field.rel:
self.execute(
self.sql_create_fk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_fk"),
"column": self.quote_name(new_field.column),
"to_table": self.quote_name(new_field.rel.to._meta.db_table),
"to_column": self.quote_name(new_field.rel.get_related_field().column),
}
)
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.get_all_related_objects():
self.execute(
self.sql_create_fk % {
"table": self.quote_name(rel.model._meta.db_table),
"name": self._create_index_name(rel.model, [rel.field.column], suffix="_fk"),
"column": self.quote_name(rel.field.column),
"to_table": self.quote_name(model._meta.db_table),
"to_column": self.quote_name(new_field.column),
}
)
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_check"),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, column, type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.rel.through._meta.db_table != new_field.rel.through._meta.db_table:
self.alter_db_table(old_field.rel.through, old_field.rel.through._meta.db_table, new_field.rel.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.rel.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field_by_name(old_field.m2m_reverse_field_name())[0],
new_field.rel.through._meta.get_field_by_name(new_field.m2m_reverse_field_name())[0],
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, BaseDatabaseCreation._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
| bsd-3-clause | -288,901,835,067,738,000 | 45.782346 | 193 | 0.549303 | false |
tvenkat/askbot-devel | askbot/management/commands/add_admin.py | 13 | 1434 | from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_save
import sys
class Command(NoArgsCommand):
def get_user(self, uid_str):
try:
uid = int(uid_str)
return User.objects.get(id=uid)
except User.DoesNotExist:
print 'sorry there is no user with id=%d' % uid
sys.exit(1)
except ValueError:
print 'user id must be integer, have %s' % uid_str
sys.exit(1)
def parse_arguments(self, arguments):
if len(arguments) != 1:
print 'argument for this command id <user_id>'
sys.exit(1)
self.user = self.get_user(arguments[0])
def confirm_action(self):
u = self.user
print ''
prompt = 'Do you really wish to make user (id=%d, name=%s) a site administrator? yes/no: ' \
% (u.id, u.username)
str = raw_input(prompt)
if str != 'yes':
print 'action canceled'
sys.exit(1)
def remove_signals(self):
pre_save.receivers = []
post_save.receivers = []
def handle(self, *arguments, **options):
#destroy pre_save and post_save signals
self.parse_arguments(arguments)
self.confirm_action()
self.remove_signals()
self.user.set_admin_status()
self.user.save()
| gpl-3.0 | 2,222,627,754,285,607,700 | 30.866667 | 100 | 0.580195 | false |
cmusatyalab/django-s3 | django_s3/forms.py | 1 | 1062 | #
# Copyright (C) 2012 Carnegie Mellon University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from django import forms
class UploadStartForm(forms.Form):
blob = forms.IntegerField()
class UploadForm(forms.Form):
blob = forms.IntegerField()
token = forms.CharField()
file = forms.FileField()
resumableChunkNumber = forms.IntegerField()
class UploadFinishForm(forms.Form):
blob = forms.IntegerField()
token = forms.CharField()
| gpl-2.0 | -134,369,736,689,553,500 | 30.235294 | 67 | 0.747646 | false |
rigmar/idapython | examples/ex_patch.py | 18 | 1139 | # -------------------------------------------------------------------------
# This is an example illustrating how to visit all patched bytes in Python
# (c) Hex-Rays
import idaapi
# -------------------------------------------------------------------------
class patched_bytes_visitor(object):
def __init__(self):
self.skip = 0
self.patch = 0
def __call__(self, ea, fpos, o, v, cnt=()):
if fpos == -1:
self.skip += 1
print(" ea: %x o: %x v: %x...skipped" % (ea, fpos, o, v))
else:
self.patch += 1
print(" ea: %x fpos: %x o: %x v: %x" % (ea, fpos, o, v))
return 0
# -------------------------------------------------------------------------
def main():
print("Visiting all patched bytes:")
v = patched_bytes_visitor()
r = idaapi.visit_patched_bytes(0, idaapi.BADADDR, v)
if r != 0:
print("visit_patched_bytes() returned %d" % r)
else:
print("Patched: %d Skipped: %d" % (v.patch, v.skip))
# -------------------------------------------------------------------------
if __name__ == '__main__':
main() | bsd-3-clause | 8,492,315,894,779,684,000 | 30.666667 | 75 | 0.378402 | false |
cirrax/openstack-nagios-plugins | openstacknagios/nova/Services.py | 1 | 4298 | #
# Copyright (C) 2014 Cirrax GmbH http://www.cirrax.com
# Benedikt Trefzer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Nagios/Icinga plugin to check running nova services.
This corresponds to the output of 'nova service-list'.
"""
import openstacknagios.openstacknagios as osnag
from novaclient.client import Client
class NovaServices(osnag.Resource):
"""
Determines the status of the nova services.
"""
def __init__(self, binary=None, host=None, args=None):
self.binary = binary
self.host = host
self.openstack = self.get_openstack_vars(args=args)
osnag.Resource.__init__(self)
def probe(self):
try:
nova=Client('2',
session = self.get_session(),
cacert = self.openstack['cacert'],
insecure = self.openstack['insecure'])
except Exception as e:
self.exit_error(str(e))
try:
result=nova.services.list(host=self.host,binary=self.binary)
except Exception as e:
self.exit_error(str(e))
stati=dict(up=0, disabled=0, down=0, total=0)
for agent in result:
stati['total'] += 1
if agent.status == 'enabled' and agent.state =='up':
stati['up'] += 1
elif agent.status == 'disabled':
stati['disabled'] += 1
else:
stati['down'] += 1
for r in stati.keys():
yield osnag.Metric(r, stati[r], min=0)
@osnag.guarded
def main():
argp = osnag.ArgumentParser(description=__doc__)
argp.add_argument('-w', '--warn', metavar='RANGE', default='0:',
help='return warning if number of up agents is outside RANGE (default: 0:, never warn)')
argp.add_argument('-c', '--critical', metavar='RANGE', default='0:',
help='return critical if number of up agents is outside RANGE (default 1:, never critical)')
argp.add_argument('--warn_disabled', metavar='RANGE', default='@1:',
help='return warning if number of disabled agents is outside RANGE (default: @1:, warn if any disabled agents')
argp.add_argument('--critical_disabled', metavar='RANGE', default='0:',
help='return critical if number of disabled agents is outside RANGE (default: 0:, never critical')
argp.add_argument( '--warn_down', metavar='RANGE', default='0:',
help='return warning if number of down agents is outside RANGE (default: 0:, never warn)')
argp.add_argument( '--critical_down', metavar='RANGE', default='0',
help='return critical if number of down agents is outside RANGE (default: 0, always critical if any')
argp.add_argument('--binary',
dest='binary',
default=None,
help='filter agent binary')
argp.add_argument('--host',
dest='host',
default=None,
help='filter hostname')
args = argp.parse_args()
check = osnag.Check(
NovaServices(args=args, host=args.host, binary=args.binary),
osnag.ScalarContext('up', args.warn, args.critical),
osnag.ScalarContext('disabled', args.warn_disabled, args.critical_disabled),
osnag.ScalarContext('down', args.warn_down, args.critical_down),
osnag.ScalarContext('total', '0:', '@0'),
osnag.Summary(show=['up','disabled','down','total'])
)
check.main(verbose=args.verbose, timeout=args.timeout)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,133,699,961,189,543,000 | 37.375 | 133 | 0.60121 | false |
studio666/gnuradio | grc/base/Connection.py | 17 | 5254 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
from . import odict
class Connection(Element):
def __init__(self, flow_graph, porta, portb):
"""
Make a new connection given the parent and 2 ports.
Args:
flow_graph: the parent of this element
porta: a port (any direction)
portb: a port (any direction)
@throws Error cannot make connection
Returns:
a new connection
"""
Element.__init__(self, flow_graph)
source = sink = None
#separate the source and sink
for port in (porta, portb):
if port.is_source(): source = port
if port.is_sink(): sink = port
if not source: raise ValueError('Connection could not isolate source')
if not sink: raise ValueError('Connection could not isolate sink')
busses = len(filter(lambda a: a.get_type() == 'bus', [source, sink]))%2
if not busses == 0: raise ValueError('busses must get with busses')
if not len(source.get_associated_ports()) == len(sink.get_associated_ports()):
raise ValueError('port connections must have same cardinality');
#ensure that this connection (source -> sink) is unique
for connection in self.get_parent().get_connections():
if connection.get_source() is source and connection.get_sink() is sink:
raise LookupError('This connection between source and sink is not unique.')
self._source = source
self._sink = sink
if source.get_type() == 'bus':
sources = source.get_associated_ports();
sinks = sink.get_associated_ports();
for i in range(len(sources)):
try:
flow_graph.connect(sources[i], sinks[i]);
except:
pass
def __str__(self):
return 'Connection (\n\t%s\n\t\t%s\n\t%s\n\t\t%s\n)'%(
self.get_source().get_parent(),
self.get_source(),
self.get_sink().get_parent(),
self.get_sink(),
)
def is_connection(self): return True
def validate(self):
"""
Validate the connections.
The ports must match in type.
"""
Element.validate(self)
platform = self.get_parent().get_parent()
source_domain = self.get_source().get_domain()
sink_domain = self.get_sink().get_domain()
if (source_domain, sink_domain) not in platform.get_connection_templates():
self.add_error_message('No connection known for domains "%s", "%s"'
% (source_domain, sink_domain))
too_many_other_sinks = (
source_domain in platform.get_domains() and
not platform.get_domain(key=source_domain)['multiple_sinks'] and
len(self.get_source().get_enabled_connections()) > 1
)
too_many_other_sources = (
sink_domain in platform.get_domains() and
not platform.get_domain(key=sink_domain)['multiple_sources'] and
len(self.get_sink().get_enabled_connections()) > 1
)
if too_many_other_sinks:
self.add_error_message(
'Domain "%s" can have only one downstream block' % source_domain)
if too_many_other_sources:
self.add_error_message(
'Domain "%s" can have only one upstream block' % sink_domain)
def get_enabled(self):
"""
Get the enabled state of this connection.
Returns:
true if source and sink blocks are enabled
"""
return self.get_source().get_parent().get_enabled() and \
self.get_sink().get_parent().get_enabled()
#############################
# Access Ports
#############################
def get_sink(self): return self._sink
def get_source(self): return self._source
##############################################
## Import/Export Methods
##############################################
def export_data(self):
"""
Export this connection's info.
Returns:
a nested data odict
"""
n = odict()
n['source_block_id'] = self.get_source().get_parent().get_id()
n['sink_block_id'] = self.get_sink().get_parent().get_id()
n['source_key'] = self.get_source().get_key()
n['sink_key'] = self.get_sink().get_key()
return n
| gpl-3.0 | -4,889,012,449,644,270,000 | 37.072464 | 91 | 0.579178 | false |
Ichag/odoo | openerp/addons/base/__init__.py | 379 | 1134 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir
import workflow
import module
import res
import report
import tests
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,669,418,776,719,179,800 | 36.8 | 79 | 0.621693 | false |
IndigoTiger/ezzybot | ezzybot/util/bucket.py | 5 | 1252 | from time import time
class TokenBucket(object):
"""An implementation of the token bucket algorithm.
>>> bucket = TokenBucket(80, 0.5)
>>> print bucket.consume(10)
True
>>> print bucket.consume(90)
False
"""
def __init__(self, tokens, fill_rate):
"""tokens is the total tokens in the bucket. fill_rate is the
rate in tokens/second that the bucket will be refilled."""
self.capacity = float(tokens)
self._tokens = float(tokens)
self.fill_rate = float(fill_rate)
self.timestamp = time()
def consume(self, tokens):
"""Consume tokens from the bucket. Returns True if there were
sufficient tokens otherwise False."""
if tokens <= self.tokens:
self._tokens -= tokens
else:
return False
return True
def refill(self):
"""Refills the token bucket"""
self._tokens = self.capacity
def get_tokens(self):
now = time()
if self._tokens < self.capacity:
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
tokens = property(get_tokens)
| gpl-3.0 | -6,717,562,289,323,511,000 | 28.809524 | 69 | 0.591853 | false |
sauloal/linuxscripts | apache/var/www/html/saulo/torrent/html/bin/clients/mainline/BTL/yielddefer25.py | 11 | 6035 | # yielddefer is an async programming mechanism with a blocking look-alike syntax
#
# The contents of this file are subject to the Python Software Foundation
# License Version 2.3 (the License). You may not copy or use this file, in
# either source code or executable form, except in compliance with the License.
# You may obtain a copy of the License at http://www.python.org/license.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# launch_coroutine maintains the illusion that the passed function
# (a generator) runs from beginning to end yielding when necessary
# for some job to complete and then continuing where it left off.
#
# def f():
# ...
# yield some_thing_that_takes_time()
# ...
# result = yield something_else()
# ...
#
# from inside a generator launched with launch_coroutine:
# wait on a deferred to be called back by yielding it
# return None by simply returning
# return an exception by throwing one
# return a value by yielding a non-Deferred
#
# by Greg Hazel
from __future__ import generators
import sys
import types
import traceback
from BTL.defer import Deferred, Failure
from BTL.stackthreading import _print_traceback
from twisted.python import failure
debug = False
name_debug = False
class GenWithDeferred(object):
if debug:
__slots__ = ['gen', 'current_deferred', 'deferred', 'queue_task' 'stack']
else:
__slots__ = ['gen', 'current_deferred', 'deferred', 'queue_task']
def __init__(self, gen, deferred, queue_task):
self.gen = gen
self.deferred = deferred
self.queue_task = queue_task
self.current_deferred = None
if debug:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
self.stack = traceback.extract_stack(f)
# cut out GenWithDeferred() and launch_coroutine
self.stack = self.stack[:-2]
def cleanup(self):
del self.gen
del self.deferred
del self.queue_task
del self.current_deferred
if debug:
del self.stack
if name_debug:
def __getattr__(self, attr):
if '_recall' not in attr:
raise AttributeError(attr)
return self._recall
def _queue_task_chain(self, v):
recall = getattr(self, "_recall_%s" % self.gen.gi_frame.f_code.co_name)
self.queue_task(recall)
return v
else:
def _queue_task_chain(self, v):
self.queue_task(self._recall)
return v
def next(self):
if not self.current_deferred:
return self.gen.next()
if isinstance(self.current_deferred.result, failure.Failure):
r = self.current_deferred.result
self.current_deferred.addErrback(lambda fuckoff: None)
return self.gen.throw(*r.exc_info())
return self.gen.send(self.current_deferred.result)
def _recall(self):
try:
df = self.next()
except StopIteration:
self.deferred.callback(None)
self.cleanup()
except Exception, e:
exc_type, value, tb = sys.exc_info()
## Magic Traceback Hacking
if debug:
# interpreter shutdown
if not sys:
return
# HERE. This should really be logged or else bittorrent-
# curses will never be able to properly output. --Dave
_print_traceback(sys.stderr, self.stack,
"generator %s" % self.gen.gi_frame.f_code.co_name, 0,
exc_type, value, tb)
else:
#if (tb.tb_lineno != self.gen.gi_frame.f_lineno or
# tb.f_code.co_filename != self.gen.gi_frame.f_code.co_filename):
# tb = FakeTb(self.gen.gi_frame, tb)
pass
## Magic Traceback Hacking
self.deferred.errback(Failure(value, exc_type, tb))
del tb
self.cleanup()
else:
if not isinstance(df, Deferred):
self.deferred.callback(df)
self.cleanup()
return
self.current_deferred = df
df.addCallback(self._queue_task_chain)
df.addErrback(self._queue_task_chain)
del df
class FakeTb(object):
__slots__ = ['tb_frame', 'tb_lineno', 'tb_orig', 'tb_next']
def __init__(self, frame, tb):
self.tb_frame = frame
self.tb_lineno = frame.f_lineno
self.tb_orig = tb
self.tb_next = tb.tb_next
def _launch_generator(queue_task, g, main_df):
gwd = GenWithDeferred(g, main_df, queue_task)
## the first one is fired for you
##gwd._recall()
# the first one is not fired for you, because if it errors the sys.exc_info
# causes an unresolvable circular reference that makes the gwd.deferred never
# be deleted.
gwd._queue_task_chain(None)
def launch_coroutine(queue_task, f, *args, **kwargs):
main_df = Deferred()
try:
g = f(*args, **kwargs)
except Exception, e:
if debug:
traceback.print_exc()
main_df.errback(Failure())
else:
if isinstance(g, types.GeneratorType):
_launch_generator(queue_task, g, main_df)
else:
# we got a non-generator, just callback with the return value
main_df.callback(g)
return main_df
# decorator
def coroutine(func, queue_task):
def replacement(*a, **kw):
return launch_coroutine(queue_task, func, *a, **kw)
return replacement
def wrap_task(add_task):
return lambda _f, *args, **kwargs : add_task(0, _f, *args, **kwargs)
_wrap_task = wrap_task
| mit | -5,791,329,552,378,903,000 | 32.342541 | 86 | 0.594366 | false |
wsdream/CARP | CARP/src/evaluator.py | 1 | 5610 | ########################################################
# evaluator.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2014/2/6
# Last updated: 2014/11/03
########################################################
import numpy as np
from numpy import linalg as LA
import time, sys
import random
import core
from utilities import *
########################################################
# Function to run the prediction approach at each density
#
def execute(tensor, density, para):
startTime = time.clock()
startTime = time.clock()
[numUser, numService, numTime] = tensor.shape
rounds = para['rounds']
logger.info('Data size: %d users * %d services * %d timeslices'\
%(numUser, numService, numTime))
logger.info('Run the algorithm for %d rounds: density = %.2f.'%(rounds, density))
evalResults = np.zeros((rounds, len(para['metrics'])))
timeResults = np.zeros((rounds, 1))
for k in range(rounds):
logger.info('----------------------------------------------')
logger.info('%d-round starts.'%(k + 1))
logger.info('----------------------------------------------')
# remove the entries of data to generate trainTensor and testTensor
(trainTensor, testTensor) = removeTensor(tensor, density, k, para)
logger.info('Removing data entries done.')
# invocation to the prediction function
iterStartTime = time.clock() # to record the running time for one round
predictedTensor = core.predict(trainTensor, para)
timeResults[k] = time.clock() - iterStartTime
# calculate the prediction error
result = np.zeros((numTime, len(para['metrics'])))
for i in range(numTime):
testMatrix = testTensor[:, :, i]
predictedMatrix = predictedTensor[:, :, i]
(testVecX, testVecY) = np.where(testMatrix)
testVec = testMatrix[testVecX, testVecY]
predVec = predictedMatrix[testVecX, testVecY]
result[i, :] = errMetric(testVec, predVec, para['metrics'])
evalResults[k, :] = np.average(result, axis=0)
logger.info('%d-round done. Running time: %.2f sec'%(k + 1, timeResults[k]))
logger.info('----------------------------------------------')
outFile = '%savg_%sResult_%.2f.txt'%(para['outPath'], para['dataType'], density)
saveResult(outFile, evalResults, timeResults, para)
logger.info('Density = %.2f done. Running time: %.2f sec'
%(density, time.clock() - startTime))
logger.info('==============================================')
########################################################
########################################################
# Function to remove the entries of data tensor
# Return the trainTensor and the corresponding testTensor
#
def removeTensor(tensor, density, round, para):
numTime = tensor.shape[2]
trainTensor = np.zeros(tensor.shape)
testTensor = np.zeros(tensor.shape)
for i in range(numTime):
seedID = round + i * 100
(trainMatrix, testMatrix) = removeEntries(tensor[:, :, i], density, seedID)
trainTensor[:, :, i] = trainMatrix
testTensor[:, :, i] = testMatrix
return trainTensor, testTensor
########################################################
########################################################
# Function to remove the entries of data matrix
# Use guassian random sampling
# Return trainMatrix and testMatrix
#
def removeEntries(matrix, density, seedID):
numAll = matrix.size
numTrain = int(numAll * density)
(vecX, vecY) = np.where(matrix > -1000)
np.random.seed(seedID % 100)
randPermut = np.random.permutation(numAll)
np.random.seed(seedID)
randSequence = np.random.normal(0, numAll / 6.0, numAll * 50)
trainSet = []
flags = np.zeros(numAll)
for i in xrange(randSequence.shape[0]):
sample = int(abs(randSequence[i]))
if sample < numAll:
idx = randPermut[sample]
if flags[idx] == 0 and matrix[vecX[idx], vecY[idx]] > 0:
trainSet.append(idx)
flags[idx] = 1
if len(trainSet) == numTrain:
break
if len(trainSet) < numTrain:
logger.critical('Exit unexpectedly: not enough data for density = %.2f.', density)
sys.exit()
trainMatrix = np.zeros(matrix.shape)
trainMatrix[vecX[trainSet], vecY[trainSet]] = matrix[vecX[trainSet], vecY[trainSet]]
testMatrix = np.zeros(matrix.shape)
testMatrix[matrix > 0] = matrix[matrix > 0]
testMatrix[vecX[trainSet], vecY[trainSet]] = 0
# ignore invalid testing users or services
idxX = (np.sum(trainMatrix, axis=1) == 0)
testMatrix[idxX, :] = 0
idxY = (np.sum(trainMatrix, axis=0) == 0)
testMatrix[:, idxY] = 0
return trainMatrix, testMatrix
########################################################
########################################################
# Function to compute the evaluation metrics
#
def errMetric(realVec, predVec, metrics):
result = []
absError = np.abs(predVec - realVec)
mae = np.sum(absError)/absError.shape
for metric in metrics:
if 'MAE' == metric:
result = np.append(result, mae)
if 'NMAE' == metric:
nmae = mae / (np.sum(realVec) / absError.shape)
result = np.append(result, nmae)
if 'RMSE' == metric:
rmse = LA.norm(absError) / np.sqrt(absError.shape)
result = np.append(result, rmse)
if 'MRE' == metric or 'NPRE' == metric:
relativeError = absError / realVec
relativeError = np.sort(relativeError)
if 'MRE' == metric:
mre = np.median(relativeError)
result = np.append(result, mre)
if 'NPRE' == metric:
npre = relativeError[np.floor(0.9 * relativeError.shape[0])]
result = np.append(result, npre)
return result
########################################################
| mit | -2,825,900,155,382,062,000 | 35.193548 | 86 | 0.58164 | false |
bhargav2408/python-for-android | python-modules/twisted/twisted/conch/ssh/service.py | 61 | 1418 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The parent class for all the SSH services. Currently implemented services
are ssh-userauth and ssh-connection.
Maintainer: Paul Swartz
"""
from twisted.python import log
class SSHService(log.Logger):
name = None # this is the ssh name for the service
protocolMessages = {} # these map #'s -> protocol names
transport = None # gets set later
def serviceStarted(self):
"""
called when the service is active on the transport.
"""
def serviceStopped(self):
"""
called when the service is stopped, either by the connection ending
or by another service being started
"""
def logPrefix(self):
return "SSHService %s on %s" % (self.name,
self.transport.transport.logPrefix())
def packetReceived(self, messageNum, packet):
"""
called when we receive a packet on the transport
"""
#print self.protocolMessages
if messageNum in self.protocolMessages:
messageType = self.protocolMessages[messageNum]
f = getattr(self,'ssh_%s' % messageType[4:],
None)
if f is not None:
return f(packet)
log.msg("couldn't handle %r" % messageNum)
log.msg(repr(packet))
self.transport.sendUnimplemented()
| apache-2.0 | -8,899,945,667,857,492,000 | 28.541667 | 75 | 0.620592 | false |
manojgudi/sandhi | modules/gr36/gnuradio-core/src/python/gnuradio/gr/qa_kludged_imports.py | 18 | 1325 | #!/usr/bin/env python
#
# Copyright 2005,2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_kludged_imports (gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_blks_import(self):
# make sure that this somewhat magic import works
from gnuradio import blks2
def test_gru_import(self):
# make sure that this somewhat magic import works
from gnuradio import gru
if __name__ == '__main__':
gr_unittest.run(test_kludged_imports, "test_kludged_imports.xml")
| gpl-3.0 | 703,080,767,544,896,900 | 29.813953 | 70 | 0.714717 | false |
jarathomas/openVA-Pipeline | pipeline.py | 1 | 49777 | #-------------------------------------------------------------------------------------------------------------------------------------------#
# openVA Pipeline: pipeline.py -- Software for processing Verbal Autopsy data with automated cause of death assignment. #
# Copyright (C) 2018 Jason Thomas, Samuel Clark, Martin Bratschi in collaboration with the Bloomberg Data for Health Initiative #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#-------------------------------------------------------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------------------------------------------------------#
# User Settings
sqlitePW = "enilepiP"
dbName = "Pipeline.db"
#-------------------------------------------------------------------------------------------------------------------------------------------#
from pysqlcipher3 import dbapi2 as sqlcipher
from pandas import read_csv, groupby
import pandas as pd
import sys
import csv
import datetime
import os
import subprocess
import shutil
import requests
import json
import sqlite3
import time
import re
import pickle
#-------------------------------------------------------------------------------------------------------------------------------------------#
# Define functions and objects needed for functioning of pipeline; then set up log files and configuration of pipeline
#-------------------------------------------------------------------------------------------------------------------------------------------#
class Dhis(object):
"""Access DHIS2 API."""
def __init__(self, dhisURL, dhisUser, dhisPass):
if '/api' in dhisURL:
print('Please do not specify /api/ in the server argument: e.g. --server=play.dhis2.org/demo')
sys.exit()
if dhisURL.startswith('localhost') or dhisURL.startswith('127.0.0.1'):
dhisURL = 'http://{}'.format(dhisURL)
elif dhisURL.startswith('http://'):
dhisURL = dhisURL
elif not dhisURL.startswith('https://'):
dhisURL = 'https://{}'.format(dhisURL)
self.auth = (dhisUser, dhisPass)
self.url = '{}/api/25'.format(dhisURL)
def get(self, endpoint, params=None):
"""
GET method for DHIS2 API.
:rtype: dict
"""
url = '{}/{}.json'.format(self.url, endpoint)
if not params:
params = {}
params['paging'] = False
try:
r = requests.get(url=url, params=params, auth=self.auth)
if r.status_code != 200:
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
return r.json()
except requests.RequestException:
raise requests.RequestException
def post(self, endpoint, data):
"""
POST method for DHIS2 API.
:rtype: dict
"""
url = '{}/{}.json'.format(self.url, endpoint)
try:
r = requests.post(url=url, json=data, auth=self.auth)
if r.status_code not in range(200, 206):
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
return r.json()
except requests.RequestException:
raise requests.RequestException
def post_blob(self, f):
""" Post file to DHIS2 and return created UID for that file
:rtype: str
"""
url = '{}/fileResources'.format(self.url)
files = {'file': (f, open(f, 'rb'), 'application/x-sqlite3', {'Expires': '0'})}
try:
r = requests.post(url, files=files, auth=self.auth)
if r.status_code not in (200, 202):
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
response = r.json()
file_id = response['response']['fileResource']['id']
return file_id
except requests.RequestException:
raise requests.RequestException
class VerbalAutopsyEvent(object):
""" DHIS2 event + a BLOB file resource"""
def __init__(self, va_id, program, dhis_orgunit, event_date, sex, dob, age, cod_code, algorithm_metadata, file_id):
self.va_id = va_id
self.program = program
self.dhis_orgunit = dhis_orgunit
self.event_date = event_date
self.sex = sex
self.dob = dob
self.age = age
self.cod_code = cod_code
self.algorithm_metadata = algorithm_metadata
self.datavalues = [
{"dataElement": "htm6PixLJNy", "value": self.va_id},
{"dataElement": "hi7qRC4SMMk", "value": self.sex},
{"dataElement": "mwSaVq64k7j", "value": self.dob},
{"dataElement": "F4XGdOBvWww", "value": self.cod_code},
{"dataElement": "wiJviUqN1io", "value": self.algorithm_metadata},
{"dataElement": "oPAg4MA0880", "value": self.age},
{"dataElement": "XLHIBoLtjGt", "value": file_id}
]
def format_to_dhis2(self, dhisUser):
"""
Format object to DHIS2 compatible event for DHIS2 API
:rtype: dict
"""
event = {
"program": self.program,
"orgUnit": self.dhis_orgunit,
"eventDate": datetime.datetime.strftime(self.event_date, '%Y-%m-%d'),
"status": "COMPLETED",
"storedBy": dhisUser,
"dataValues": self.datavalues
}
return event
def __str__(self):
return json.dumps(self, default=lambda o: o.__dict__)
def create_db(fName, evaList):
"""
Create a SQLite database with VA data + COD
:rtype: None
"""
conn = sqlite3.connect(fName)
with conn:
cur = conn.cursor()
cur.execute("CREATE TABLE vaRecord(ID INT, Attrtibute TEXT, Value TEXT)")
cur.executemany("INSERT INTO vaRecord VALUES (?,?,?)", evaList)
def getCODCode(myDict, searchFor):
for i in range(len(myDict.keys())):
match = re.search(searchFor, list(myDict.keys())[i])
if match:
return list(myDict.values())[i]
# set the ODK_Conf table item odkLastRunResult as 0, log error message, and exit script
def cleanup(errorMsg):
# handle single case when DB file not found
if connectionError == "1":
with open(connectionErrorFile, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow([timeFMT, "Unable to Connect to SQLite Database, see {} for details".format(errorFile)])
sys.exit(1)
else:
# update ODK_Conf table with LastRunResult = 0
try:
sql = "UPDATE ODK_Conf SET odkLastRunResult = ?"
par = ("0",)
cursor.execute(sql, par)
db.commit()
if os.path.isfile(connectionErrorFile) == True:
try:
os.remove(connectionErrorFile)
except OSError:
print("Could not remove {}".format(connectionErrorFile))
# write errorMsg to errorFile if DB is inaccessible
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError):
db.rollback()
errorMsg[2] += "; unable to set odkLastRunResult to 0 (in ODK_Conf table)"
try:
with open(errorFile, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow(errorMsg)
except OSError:
print(errorMsg)
# close DB resources and exit script
finally:
cursor.close()
db.close()
sys.exit(1)
def findKeyValue(key, d):
if key in d:
yield d[key]
for k in d:
if isinstance(d[k], list):
for i in d[k]:
for j in findKeyValue(key, i):
yield j
# error log files
errorFile = "./dbErrorLog.csv"
timeFMT = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
connectionError = "0"
connectionErrorFile = "./sqlConnect.csv"
## create error file if it does not exist
if os.path.isfile(errorFile) == False:
try:
with open(errorFile, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["Date"] + ["Description"] + ["Additional Information"])
except (OSError) as e:
print(str(e))
sys.exit(1)
# connect to the database and configure the pipeline's settings for ODK Aggregate, openVA, and DHIS2.
if os.path.isfile(dbName) == False:
connectionError = "1"
with open(errorFile, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow([timeFMT, "Database {}.db not found".format(dbName), ])
cleanup()
db = sqlcipher.connect(dbName)
db.execute("PRAGMA key = " + sqlitePW)
sqlODK = "SELECT odkID, odkURL, odkUser, odkPass, odkFormID, odkLastRun, odkLastRunResult FROM ODK_Conf"
sqlPipeline = "SELECT workingDirectory, openVA_Algorithm, algorithmMetadataCode, codSource FROM Pipeline_Conf"
sqlInterVA4 = "SELECT HIV, Malaria FROM InterVA4_Conf"
sqlAdvancedInterVA4 = "SELECT directory, filename, output, append, groupcode, replicate, replicate_bug1, replicate_bug2, write FROM Advanced_InterVA4_Conf"
sqlInSilicoVA = "SELECT Nsim FROM InSilicoVA_Conf"
sqlAdvancedInSilicoVA = "SELECT isNumeric, updateCondProb, keepProbbase_level, CondProb, CondProbNum, datacheck, datacheck_missing," \
+ "warning_write, external_sep, thin, burnin, auto_length, conv_csmf, jump_scale," \
+ "levels_prior, levels_strength, trunc_min, trunc_max, subpop, java_option, seed," \
+ "phy_code, phy_cat, phy_unknown, phy_external, phy_debias, exclude_impossible_cause, indiv_CI " \
+ "FROM Advanced_InSilicoVA_Conf"
sqlDHIS = "SELECT dhisURL, dhisUser, dhisPass, dhisOrgUnit FROM DHIS_Conf"
sqlCODCodes_WHO = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'WHO'"
sqlCODCodes_Tariff = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'Tariff'"
## grab configuration settings from SQLite DB
try:
# ODK configuration
cursor = db.cursor()
cursor.execute(sqlODK)
odkQuery = cursor.fetchall()
for row in odkQuery:
odkID = row[0]
odkURL = row[1]
odkUser = row[2]
odkPass = row[3]
odkFormID = row[4]
odkLastRun = row[5]
odkLastRunDate = datetime.datetime.strptime(odkLastRun, "%Y-%m-%d_%H:%M:%S").strftime("%Y/%m/%d")
odkLastRunDatePrev = (datetime.datetime.strptime(odkLastRunDate, "%Y/%m/%d") - datetime.timedelta(days=1)).strftime("%Y/%m/%d")
odkLastRunResult = row[6]
# Pipeline configuration
cursor.execute(sqlPipeline)
pipelineQuery = cursor.fetchall()
for row in pipelineQuery:
processDir = row[0]
pipelineAlgorithm = row[1]
algorithmMetadataCode = row[2]
codSource = row[3]
# InterVA4 configuration
cursor.execute(sqlInterVA4)
interVA4Query = cursor.fetchall()
for row in interVA4Query:
interVA_HIV = row[0]
interVA_Malaria = row[1]
# InterVA4 advanced configuration
cursor.execute(sqlAdvancedInterVA4)
advancedInterVA4Query = cursor.fetchall()
for row in advancedInterVA4Query:
interVA_directory = row[0]
interVA_filename = row[1]
interVA_output = row[2]
interVA_append = row[3]
interVA_groupcode = row[4]
interVA_replicate = row[5]
interVA_replicate_bug1 = row[6]
interVA_replicate_bug2 = row[7]
interVA_write = row[8]
# InSilicoVA configuration
cursor.execute(sqlInSilicoVA)
insilicoVAQuery = cursor.fetchall()
for row in insilicoVAQuery:
insilico_Nsim = row[0]
# InSilicoVA advanced configuration
cursor.execute(sqlAdvancedInSilicoVA)
advancedInsilicoVAQuery = cursor.fetchall()
for row in advancedInsilicoVAQuery:
insilico_isNumeric = row[ 0]
insilico_updateCondProb = row[ 1]
insilico_keepProbbase_level = row[ 2]
insilico_CondProb = row[ 3]
insilico_CondProbNum = row[ 4]
insilico_datacheck = row[ 5]
insilico_datacheck_missing = row[ 6]
insilico_warning_write = row[ 7]
insilico_external_sep = row[ 8]
insilico_thin = row[ 9]
insilico_burnin = row[10]
insilico_auto_length = row[11]
insilico_conv_csmf = row[12]
insilico_jump_scale = row[13]
insilico_levels_prior = row[14]
insilico_levels_strength = row[15]
insilico_trunc_min = row[16]
insilico_trunc_max = row[17]
insilico_subpop = row[18]
insilico_java_option = row[19]
insilico_seed = row[20]
insilico_phy_code = row[21]
insilico_phy_cat = row[22]
insilico_phy_unknown = row[23]
insilico_phy_external = row[24]
insilico_phy_debias = row[25]
insilico_exclude_impossible_cause = row[26]
insilico_indiv_CI = row[27]
# DHIS2 configuration
cursor.execute(sqlDHIS)
dhisQuery = cursor.fetchall()
for row in dhisQuery:
dhisURL = row[0]
dhisUser = row[1]
dhisPass = row[2]
dhisOrgUnit = row[3]
# CoD Codes for DHIS2
cursor.execute(sqlCODCodes_WHO)
resultsWHO = cursor.fetchall()
codesWHO = dict(resultsWHO)
cursor.execute(sqlCODCodes_Tariff)
resultsTariff = cursor.fetchall()
codesTariff = dict(resultsTariff)
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Problem selecting config information from ODK_Conf ", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Problem selecting config information from ODK_Conf"]
cleanup(errorMsg)
#-------------------------------------------------------------------------------------------------------------------------------------------#
# create folders & files to store (ODK & openVA) input and output; also create call to ODK Briefcase
#-------------------------------------------------------------------------------------------------------------------------------------------#
odkBCExportDir = processDir + "/ODKExport"
odkBCExportFilename = "ODKExportNew.csv"
odkBCExportPrevious = odkBCExportDir + "/ODKExportPrevious.csv"
odkBCExportNewFile = odkBCExportDir + "/" + odkBCExportFilename
odkBCArgumentList = "java -jar ODK-Briefcase-v1.10.1.jar -oc -em -id '" + odkFormID + "' -sd '" + odkBCExportDir + "' -ed '" \
+ odkBCExportDir + "' -f '" + odkBCExportFilename + "' -url '" + odkURL + "' -u '" + odkUser \
+ "' -p '" + odkPass + "' -start '" + odkLastRunDatePrev + "'"
openVAFilesDir = processDir + "/OpenVAFiles"
openVAReadyFile = odkBCExportDir + "/OpenVAReadyFile.csv"
rScriptIn = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".R"
rScriptOut = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".Rout"
dhisDir = processDir + "/DHIS2"
if codSource=="WHO":
dhisCODCodes = codesWHO
else:
dhisCODCodes = codesTariff
# check if processing directory exists and create if necessary
if not os.path.exists(processDir):
try:
os.makedirs(processDir)
except OSError as e:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create processing directory: " + processDir, str(e), timeFMT)
try:
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not create processing directory: " + processDir]
cleanup(errorMsg)
# create openVAFilesDir (if does not exist)
if not os.path.exists(openVAFilesDir + "/" + timeFMT):
try:
os.makedirs(openVAFilesDir + "/" + timeFMT)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create openVA Directory: " + openVAFilesDir + "/" + timeFMT, str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not create openVA directory: " + openVAFilesDir + "/" + timeFMT]
cleanup(errorMsg)
# make a copy of current ODK Briefcase Export file, to compare with new file once exported (if there is an existing export file)
if os.path.isfile(odkBCExportNewFile) == True and odkLastRunResult == 1 and not os.path.isfile(connectionErrorFile):
try:
shutil.copy(odkBCExportNewFile, odkBCExportPrevious)
except (OSError, shutil.Error) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error: Trying to copy export files from ODK Briefcase", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Trying to copy export files from ODK Briefcase"]
cleanup(errorMsg)
try:
os.remove(openVAReadyFile)
except (OSError) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime)"
par = ("Could not remove " + openVAReadyFile, str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not remove " + openVAReadyFile]
cleanup(errorMsg)
# launch ODK Briefcase to collect ODK Aggregate data and export to file for further processing
try:
process = subprocess.Popen(odkBCArgumentList, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
rc = process.returncode
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not launch ODK Briefcase Java Application", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Could not launch ODK Briefcase Java Application",""]
cleanup(errorMsg)
# catch application errors from ODK Briefcase and log into EventLog table
if rc != 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (str(stderr), "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(stderr),""]
cleanup(errorMsg)
if "SEVERE" in str(stderr):
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (stderr,"Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(stderr),""]
cleanup(errorMsg)
else:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Briefcase Export Completed Successfully", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?"
par = (timeFMT,"1")
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "ODK Briefcase ran successfully but problems writing to DB (check odkLastRunResult in ODK_Conf)"]
cleanup(errorMsg)
# check if previous file exists from above operations and create delta file of new entries
if os.path.isfile(odkBCExportPrevious) == True:
try:
## WARNING: odkBCExportPrevious & odkBCExportNewFil (CSV files)
## contain sensitive VA information (leaving them in folder)
with open(odkBCExportPrevious, "r", newline="") as t1, open(odkBCExportNewFile, "r", newline="") as t2:
fileone = t1.readlines()
filetwo = t2.readlines()
header = filetwo[0]
with open(openVAReadyFile, "w", newline="") as outFile:
outFile.write(header)
for line in filetwo:
if line not in fileone:
outFile.write(line)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES"
par = ("Could not create: " + openVAReadyFile, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not create: " + openVAReadyFile]
cleanup(errorMsg)
else:
# if there is no pre-existing ODK Briefcase Export file, then copy and rename to OpenVAReadyFile.csv
try:
shutil.copy(odkBCExportNewFile, openVAReadyFile)
except (OSError, shutil.Error) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (e, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not copy: " + odkBCExportNewFile + " to: " + openVAReadyFile]
cleanup(errorMsg)
# if no records retrieved, then close up shop; otherwise, create R script for running openVA
## WARNING: openVAReadyFile (CSV file) contains sensitive VA information (leaving it in folder)
with open(openVAReadyFile, "r", newline="") as outFile:
nRecords = len(list(outFile)) - 1 ## take away 1 for the column header
if nRecords == 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("No Records From ODK Briefcase (nothing more to do)", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "No records from ODK Briefcase, but error writing to DB"]
cleanup(errorMsg)
try:
sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?"
par = (timeFMT,"1")
cursor.execute(sql, par)
db.commit()
cursor.close()
db.close()
sys.exit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e),
"No records from ODK Briefcase, but error writing to DB (trying to set odkLastRun & odkLastRunResult)."]
cleanup(errorMsg)
try:
with open(rScriptIn, "w", newline="") as f:
f.write("date() \n")
f.write("library(openVA); library(CrossVA) \n")
f.write("getwd() \n")
f.write("records <- read.csv('" + openVAReadyFile + "') \n")
# InSilicoVA
if pipelineAlgorithm == "InSilicoVA":
f.write("names(data) <- tolower(data) \n")
f.write("data <- map_records_insilicova(records) \n")
## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate
if odkID == None:
f.write("data$ID <- records$meta.instanceID \n")
else:
f.write("data$ID <- records$" + odkID + "\n")
f.write("results <- insilico(data=data, " + ", \n")
f.write("\t isNumeric=" + insilico_isNumeric + ", \n")
f.write("\t updateCondProb=" + insilico_updateCondProb + ", \n")
f.write("\t keepProbbase.level=" + insilico_keepProbbase_level + ", \n")
f.write("\t CondProb=" + insilico_CondProb + ", \n")
f.write("\t CondProbNum=" + insilico_CondProbNum + ", \n")
f.write("\t datacheck=" + insilico_datacheck + ", \n")
f.write("\t datacheck.missing=" + insilico_datacheck_missing + ", \n")
f.write("\t warning.write=" + insilico_warning_write + ", \n")
f.write("\t external.sep=" + insilico_external_sep + ", \n")
f.write("\t Nsim=" + insilico_Nsim + ", \n")
f.write("\t thin=" + insilico_thin + ", \n")
f.write("\t burnin=" + insilico_burnin + ", \n")
f.write("\t auto.length=" + insilico_auto_length + ", \n")
f.write("\t conv.csmf=" + insilico_conv_csmf + ", \n")
f.write("\t jump.scale=" + insilico_jump_scale + ", \n")
f.write("\t levels.prior=" + insilico_levels_prior + ", \n")
f.write("\t levels.strength=" + insilico_levels_strength + ", \n")
f.write("\t trunc.min=" + insilico_trunc_min + ", \n")
f.write("\t trunc.max=" + insilico_trunc_max + ", \n")
f.write("\t subpop=" + insilico_subpop + ", \n")
f.write("\t java.option=" + insilico_java_option + ", \n")
f.write("\t seed=" + insilico_seed + ", \n")
f.write("\t phy.code=" + insilico_phy_code + ", \n")
f.write("\t phy.cat=" + insilico_phy_cat + ", \n")
f.write("\t phy.unknown=" + insilico_phy_unknown + ", \n")
f.write("\t phy.external=" + insilico_phy_external + ", \n")
f.write("\t phy.debias=" + insilico_phy_debias + ", \n")
f.write("\t exclude.impossible.cause=" + insilico_exclude_impossible_cause + ", \n")
f.write("\t indiv.CI=" + insilico_indiv_CI + ") \n")
f.write("sex <- ifelse(tolower(data$male)=='y', 'Male', 'Female') \n")
# InterVA
if pipelineAlgorithm == "InterVA":
f.write("data <- map_records_interva4(records) \n")
## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate
if odkID == None:
f.write("data$ID <- records$meta.instanceID \n")
else:
f.write("data$ID <- records$" + odkID + "\n")
f.write("results <- InterVA(Input=data, \n")
f.write("\t HIV= '" + interVA_HIV + "', \n")
f.write("\t Malaria = '" + interVA_Malaria + "', \n")
f.write("\t output='" + interVA_output + "', \n")
f.write("\t groupcode=" + interVA_groupcode + ", \n")
f.write("\t replicate=" + interVA_replicate + ", \n")
f.write("\t replicate.bug1=" + interVA_replicate_bug1 + ", \n")
f.write("\t replicate.bug2=" + interVA_replicate_bug2 + ", \n")
f.write("\t write=FALSE) \n")
f.write("sex <- ifelse(tolower(data$MALE)=='y', 'Male', 'Female') \n")
# write results
f.write("cod <- getTopCOD(results) \n")
f.write("hasCOD <- as.character(data$ID) %in% as.character(levels(cod$ID)) \n")
f.write("dob <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10021), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary!
f.write("dod <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10023), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary!
f.write("age <- floor(records$consented.deceased_CRVS.info_on_deceased.ageInDays/365.25) \n")
f.write("## create matrices for DHIS2 blob (data2) and transfer database (data3) \n")
f.write("## first column must be ID \n")
f.write("metadataCode <- '" + algorithmMetadataCode + "'\n")
f.write("cod2 <- rep('MISSING', nrow(data)); cod2[hasCOD] <- as.character(cod[,2]) \n")
f.write("data2 <- cbind(data[,-1], cod2, metadataCode) \n")
f.write("names(data2) <- c(names(data[,-1]), 'Cause of Death', 'Metadata') \n")
f.write("evaBlob <- cbind(rep(as.character(data[,1]), each=ncol(data2)), rep(names(data2)), c(apply(data2, 1, c))) \n")
f.write("colnames(evaBlob) <- c('ID', 'Attribute', 'Value') \n")
f.write("write.csv(evaBlob, file='" + openVAFilesDir + "/entityAttributeValue.csv', row.names=FALSE, na='') \n\n")
f.write("data3 <- cbind(as.character(data[,1]), sex, dob, dod, age, cod2, metadataCode, data[,-1]) \n")
f.write("names(data3) <- c('id', 'sex', 'dob', 'dod', 'age', 'cod', 'metadataCode', names(data[,-1])) \n")
f.write("write.csv(data3, file='" + openVAFilesDir + "/recordStorage.csv', row.names=FALSE, na='') \n")
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create R Script File","Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not create R Script File"]
cleanup(errorMsg)
# run R script
rBatch = "R CMD BATCH --vanilla " + rScriptIn + " " + rScriptOut
rprocess = subprocess.Popen(rBatch, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = rprocess.communicate()
rrc = rprocess.returncode
if rrc != 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not run R Script", str(stderr), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Could not run R Script", str(stderr)]
cleanup(errorMsg)
else:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("OpenVA Analysis Completed Successfully", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "OpenVA Analysis Completed Successfully (error committing message to database)."]
cleanup(errorMsg)
# push results to DHIS2
try:
api = Dhis(dhisURL, dhisUser, dhisPass)
except (requests.RequestException) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to connect to DHIS2", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Unable to connect to DHIS2"]
cleanup(errorMsg)
# verify VA program and orgUnit
try:
vaPrograms = api.get("programs", params={"filter": "name:like:Verbal Autopsy"}).get("programs")
orgUnitValid = len(api.get("organisationUnits", params={"filter": "id:eq:{}".format(dhisOrgUnit)})["organisationUnits"])==1
if not orgUnitValid:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Organisation Unit UID could not be found.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Organisation Unit UID could not be found.", "Error committing message to database"]
cleanup(errorMsg)
if not vaPrograms:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("'Verbal Autopsy' program not found", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: 'Verbal Autopsy' program not found.", "Error committing message to database"]
cleanup(errorMsg)
elif len(vaPrograms) > 1:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("More than one 'Verbal Autopsy' found.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: More than one 'Verbal Autopsy' found.", "Error committing message to database"]
cleanup(errorMsg)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID",
"Error committing message to database"]
cleanup(errorMsg)
vaProgramUID = vaPrograms[0]["id"]
blobPath = os.path.join(dhisDir, "blobs")
try:
if not os.path.isdir(blobPath):
os.makedirs(blobPath)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to create folder for DHIS blobs.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Unable to create folder for DHIS blobs."]
cleanup(errorMsg)
events = []
export = {}
## read in VA data (with COD and algorithm metadata) from csv's (and create groups by ID for Entity-Attribute-Value file)
try:
## WARNING: The following CSV file contains sensitive VA information (leaving it in folder)!
dfDHIS2 = pd.read_csv(openVAFilesDir + "/entityAttributeValue.csv")
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv",
"Error committing message to database"]
cleanup(errorMsg)
grouped = dfDHIS2.groupby(["ID"])
## prepare events for DHIS2 export
try:
with open(openVAFilesDir + "/recordStorage.csv", "r", newline="") as csvIn:
with open(openVAFilesDir + "/newStorage.csv", "w", newline="") as csvOut:
reader = csv.reader(csvIn)
writer = csv.writer(csvOut, lineterminator="\n")
header = next(reader)
header.extend(["dhisVerbalAutopsyID", "pipelineOutcome"])
writer.writerow(header)
for row in reader:
if row[5]!="MISSING":
vaID = str(row[0])
blobFile = "{}.db".format(os.path.join(dhisDir, "blobs", vaID))
blobRecord = grouped.get_group(str(row[0]))
blobEVA = blobRecord.values.tolist()
## create DHIS2 blob
try:
create_db(blobFile, blobEVA)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to create DHIS2 BLOB", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to create DHIS2 BLOB", "Error committing message to database"]
cleanup(errorMsg)
## post DHIS2 blob
try:
fileID = api.post_blob(blobFile)
except requests.RequestException as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to post BLOB to DHIS2", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Unable to post BLOB to DHIS2"]
cleanup(errorMsg)
sex = row[1].lower()
dob = row[2]
if row[3] =="":
eventDate = datetime.date(9999,9,9)
else:
dod = datetime.datetime.strptime(row[3], "%Y-%m-%d")
eventDate = datetime.date(dod.year, dod.month, dod.day)
age = row[4]
if row[5] == "Undetermined":
codCode = "99"
else:
codCode = getCODCode(dhisCODCodes, row[5])
e = VerbalAutopsyEvent(vaID, vaProgramUID, dhisOrgUnit,
eventDate, sex, dob, age, codCode, algorithmMetadataCode, fileID)
events.append(e.format_to_dhis2(dhisUser))
row.extend([vaID, "Pushing to DHIS2"])
writer.writerow(row)
else:
row.extend(["", "No CoD Assigned"])
writer.writerow(row)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir,
"Error committing message to database"]
cleanup(errorMsg)
export["events"] = events
try:
log = api.post("events", data=export)
except requests.RequestException as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to post events to DHIS2 VA Program.", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Unable to post events to DHIS2 VA Program."]
cleanup(errorMsg)
if 'importSummaries' not in log['response'].keys():
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Failed to retrieve summary from post to DHIS2 VA Program.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error", "Failed to retrieve summary from post to DHIS2 VA Program."]
cleanup(errorMsg)
if log["httpStatusCode"] == 200:
nPosted = len(log['response']['importSummaries'])
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Successfully posted {} events to DHIS2 VA Program.".format(nPosted), "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Successfully posted {} events to DHIS2 VA Program, but error writing to DB".format(nPosted)]
cleanup(errorMsg)
vaReferences = list(findKeyValue("reference", d=log["response"]))
dfNewStorage = pd.read_csv(openVAFilesDir + "/newStorage.csv")
try:
for vaReference in vaReferences:
postedDataValues = api.get("events/{}".format(vaReference)).get("dataValues")
postedVAIDIndex = next((index for (index, d) in enumerate(postedDataValues) if d["dataElement"]=="htm6PixLJNy"), None)
postedVAID = postedDataValues[postedVAIDIndex]["value"]
rowVAID = dfNewStorage["dhisVerbalAutopsyID"] == postedVAID
dfNewStorage.loc[rowVAID,"pipelineOutcome"] = "Pushed to DHIS2"
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error trying to verify events posted to DHIS2", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error trying to verify events posted to DHIS2", ""]
cleanup(errorMsg)
# store results in database
try:
for row in dfNewStorage.itertuples():
xferDBID = row[1]
xferDBOutcome = row[254]
vaData = row[1],row[8:253]
vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))])
xferDBRecord = pickle.dumps(vaDataFlat)
sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)"
par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT]
cursor.execute(sqlXferDB, par)
db.commit()
## note: to read back in: (1) cursor.exetute(SQL SELECT STATEMENT) (2) results = pickle.loads(sqlResult[0])
## An alternative version of storing VA records to SQLite DB(not relying on pickle)
# for row in dfNewStorage.itertuples():
# xferDBID = row[1]
# xferDBOutcome = row[254]
# with open("xferDBRecord.txt", "w", newline="") as f:
# vaData = row[1],row[8:253]
# vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))])
# writer = csv.writer(f, lineterminator="\n")
# writer.writerow(vaDataFlat)
# with open("xferDBRecord.txt", "rb") as f:
# xferDBRecord = f.read()
# sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)"
# par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT]
# cursor.execute(sqlXferDB, par)
# db.commit()
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error storing Blobs to {}.db".format(dbName), "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error storing Blobs to {}.db".format(dbName), ""]
cleanup(errorMsg)
try:
nNewStorage = dfNewStorage.shape[0]
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Stored {} records to {}.db".format(nNewStorage, dbName), "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e),
"Stored {} records to {}.db, but error trying to log message to EventLog".format(nNewStorage, dbName)]
cleanup(errorMsg)
# all done!
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Successful completion of Pipeline", "Information", str(datetime.datetime.now()))
cursor.execute(sql, par)
db.commit()
cursor.close()
db.close()
sys.exit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Finished executing Pipeline steps, but error trying to log last message."]
cleanup(errorMsg)
| gpl-3.0 | 4,634,560,813,896,929,000 | 49.279798 | 171 | 0.53587 | false |
gfyoung/pandas | pandas/tests/frame/indexing/test_getitem.py | 2 | 5364 | import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
MultiIndex,
Series,
Timestamp,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH#1211; smoketest unrelated to the rest of this test
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame(
[[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
class TestGetitemListLike:
def test_getitem_list_missing_key(self):
# GH#13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
df.columns = ["x", "x", "z"]
# Check that we get the correct value in the KeyError
with pytest.raises(KeyError, match=r"\['y'\] not in index"):
df[["x", "y", "z"]]
class TestGetitemCallable:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, "a"]
tm.assert_frame_equal(result, expected)
class TestGetitemBooleanMask:
def test_getitem_bool_mask_categorical_index(self):
df3 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=True),
name="B",
),
)
df4 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=False),
name="B",
),
)
result = df3[df3.index == "a"]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == "a"]
expected = df4.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name='B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name='B')
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
df4[df4.index < 2]
with pytest.raises(TypeError, match=msg):
df4[df4.index > 1]
| bsd-3-clause | 7,923,465,243,973,377,000 | 29.477273 | 79 | 0.537472 | false |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/training/server_lib_same_variables_no_clear_test.py | 125 | 2159 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesNoClearTest(test.TestCase):
# Verifies behavior of multiple variables with multiple sessions connecting to
# the same server.
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testSameVariablesNoClear(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess_1:
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
sess_1.run([v0.initializer, v1.initializer])
self.assertAllEqual([[4]], sess_1.run(v2))
with session.Session(server.target) as sess_2:
new_v0 = ops.get_default_graph().get_tensor_by_name("v0:0")
new_v1 = ops.get_default_graph().get_tensor_by_name("v1:0")
new_v2 = math_ops.matmul(new_v0, new_v1)
self.assertAllEqual([[4]], sess_2.run(new_v2))
if __name__ == "__main__":
test.main()
| mit | -7,061,813,193,182,121,000 | 38.981481 | 80 | 0.698472 | false |
nilsgrabbert/spark | examples/src/main/python/streaming/hdfs_wordcount.py | 85 | 1865 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in new text files created in the given directory
Usage: hdfs_wordcount.py <directory>
<directory> is the directory that Spark Streaming will use to find and read new text files.
To run this on your local machine on directory `localdir`, run this example
$ bin/spark-submit examples/src/main/python/streaming/hdfs_wordcount.py localdir
Then create a text file in `localdir` and the words in the file will get counted.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: hdfs_wordcount.py <directory>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonStreamingHDFSWordCount")
ssc = StreamingContext(sc, 1)
lines = ssc.textFileStream(sys.argv[1])
counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda x: (x, 1))\
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
| apache-2.0 | 3,783,801,989,740,377,000 | 36.3 | 94 | 0.720643 | false |
ddrmanxbxfr/servo | tests/wpt/css-tests/tools/py/testing/code/test_code.py | 216 | 4212 | import py
import sys
def test_ne():
code1 = py.code.Code(compile('foo = "bar"', '', 'exec'))
assert code1 == code1
code2 = py.code.Code(compile('foo = "baz"', '', 'exec'))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file():
name = 'abc-123'
co_code = compile("pass\n", name, 'exec')
assert co_code.co_filename == name
code = py.code.Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_with_class():
class A:
pass
py.test.raises(TypeError, "py.code.Code(A)")
if True:
def x():
pass
def test_code_fullsource():
code = py.code.Code(x)
full = code.fullsource
assert 'test_code_fullsource()' in str(full)
def test_code_source():
code = py.code.Code(x)
src = code.source()
expected = """def x():
pass"""
assert str(src) == expected
def test_frame_getsourcelineno_myself():
def func():
return sys._getframe(0)
f = func()
f = py.code.Frame(f)
source, lineno = f.code.fullsource, f.lineno
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource():
def func():
return sys._getframe(0)
f = func()
f = py.code.Frame(f)
prop = f.code.__class__.fullsource
try:
f.code.__class__.fullsource = None
assert f.statement == py.code.Source("")
finally:
f.code.__class__.fullsource = prop
def test_code_from_func():
co = py.code.Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_builtin_patch_unpatch(monkeypatch):
cpy_builtin = py.builtin.builtins
comp = cpy_builtin.compile
def mycompile(*args, **kwargs):
return comp(*args, **kwargs)
class Sub(AssertionError):
pass
monkeypatch.setattr(cpy_builtin, 'AssertionError', Sub)
monkeypatch.setattr(cpy_builtin, 'compile', mycompile)
py.code.patch_builtins()
assert cpy_builtin.AssertionError != Sub
assert cpy_builtin.compile != mycompile
py.code.unpatch_builtins()
assert cpy_builtin.AssertionError is Sub
assert cpy_builtin.compile == mycompile
def test_unicode_handling():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise Exception(value)
excinfo = py.test.raises(Exception, f)
s = str(excinfo)
if sys.version_info[0] < 3:
u = unicode(excinfo)
def test_code_getargs():
def f1(x):
pass
c1 = py.code.Code(f1)
assert c1.getargs(var=True) == ('x',)
def f2(x, *y):
pass
c2 = py.code.Code(f2)
assert c2.getargs(var=True) == ('x', 'y')
def f3(x, **z):
pass
c3 = py.code.Code(f3)
assert c3.getargs(var=True) == ('x', 'z')
def f4(x, *y, **z):
pass
c4 = py.code.Code(f4)
assert c4.getargs(var=True) == ('x', 'y', 'z')
def test_frame_getargs():
def f1(x):
return sys._getframe(0)
fr1 = py.code.Frame(f1('a'))
assert fr1.getargs(var=True) == [('x', 'a')]
def f2(x, *y):
return sys._getframe(0)
fr2 = py.code.Frame(f2('a', 'b', 'c'))
assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
def f3(x, **z):
return sys._getframe(0)
fr3 = py.code.Frame(f3('a', b='c'))
assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
def f4(x, *y, **z):
return sys._getframe(0)
fr4 = py.code.Frame(f4('a', 'b', c='d'))
assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
('z', {'c': 'd'})]
class TestExceptionInfo:
def test_bad_getsource(self):
try:
if False: pass
else: assert False
except AssertionError:
exci = py.code.ExceptionInfo()
assert exci.getrepr()
class TestTracebackEntry:
def test_getsource(self):
try:
if False: pass
else: assert False
except AssertionError:
exci = py.code.ExceptionInfo()
entry = exci.traceback[0]
source = entry.getsource()
assert len(source) == 4
assert 'else: assert False' in source[3]
| mpl-2.0 | 5,943,583,065,023,950,000 | 25.490566 | 76 | 0.57075 | false |
dralves/nixysa | third_party/ply-3.1/test/lex_hedit.py | 174 | 1141 | # -----------------------------------------------------------------------------
# hedit.py
#
# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
#
# These tokens can't be easily tokenized because they are of the following
# form:
#
# nHc1...cn
#
# where n is a positive integer and c1 ... cn are characters.
#
# This example shows how to modify the state of the lexer to parse
# such tokens
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = (
'H_EDIT_DESCRIPTOR',
)
# Tokens
t_ignore = " \t\n"
def t_H_EDIT_DESCRIPTOR(t):
r"\d+H.*" # This grabs all of the remaining text
i = t.value.index('H')
n = eval(t.value[:i])
# Adjust the tokenizing position
t.lexer.lexpos -= len(t.value) - (i+1+n)
t.value = t.value[i+1:i+1+n]
return t
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lex.lex()
lex.runmain(data="3Habc 10Habcdefghij 2Hxy")
| apache-2.0 | -3,758,831,825,911,637,500 | 23.276596 | 79 | 0.516214 | false |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ip_pfilter_oper.py | 1 | 21114 | """ Cisco_IOS_XR_ip_pfilter_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ip\-pfilter package operational data.
This module contains definitions
for the following management objects\:
pfilter\-ma\: Root class of PfilterMa Oper schema
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class PfilterMa(object):
"""
Root class of PfilterMa Oper schema
.. attribute:: nodes
Node\-specific operational data
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = PfilterMa.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
Node\-specific operational data
.. attribute:: node
PfilterMa operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
PfilterMa operational data for a particular
node
.. attribute:: node_name <key>
The node
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: process
Operational data for pfilter
**type**\: :py:class:`Process <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.process = PfilterMa.Nodes.Node.Process()
self.process.parent = self
class Process(object):
"""
Operational data for pfilter
.. attribute:: ipv4
Operational data for pfilter
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv4>`
.. attribute:: ipv6
Operational data for pfilter
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv6>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ipv4 = PfilterMa.Nodes.Node.Process.Ipv4()
self.ipv4.parent = self
self.ipv6 = PfilterMa.Nodes.Node.Process.Ipv6()
self.ipv6.parent = self
class Ipv6(object):
"""
Operational data for pfilter
.. attribute:: interfaces
Operational data for pfilter
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv6.Interfaces>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interfaces = PfilterMa.Nodes.Node.Process.Ipv6.Interfaces()
self.interfaces.parent = self
class Interfaces(object):
"""
Operational data for pfilter
.. attribute:: interface
Operational data for pfilter
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv6.Interfaces.Interface>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
Operational data for pfilter
.. attribute:: interface_name <key>
Name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: acl_information
Interface ACL Details
**type**\: str
**mandatory**\: True
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.acl_information = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interface[Cisco-IOS-XR-ip-pfilter-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.acl_information is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv6.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv6.Interfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:ipv6'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv6']['meta_info']
class Ipv4(object):
"""
Operational data for pfilter
.. attribute:: interfaces
Operational data for pfilter
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv4.Interfaces>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interfaces = PfilterMa.Nodes.Node.Process.Ipv4.Interfaces()
self.interfaces.parent = self
class Interfaces(object):
"""
Operational data for pfilter
.. attribute:: interface
Operational data for pfilter
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv4.Interfaces.Interface>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
Operational data for pfilter
.. attribute:: interface_name <key>
Name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: acl_information
Interface ACL Details
**type**\: str
**mandatory**\: True
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.acl_information = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interface[Cisco-IOS-XR-ip-pfilter-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.acl_information is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv4.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv4.Interfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv4']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:process'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.ipv4 is not None and self.ipv4._has_data():
return True
if self.ipv6 is not None and self.ipv6._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-ip-pfilter-oper:pfilter-ma/Cisco-IOS-XR-ip-pfilter-oper:nodes/Cisco-IOS-XR-ip-pfilter-oper:node[Cisco-IOS-XR-ip-pfilter-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node_name is not None:
return True
if self.process is not None and self.process._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ip-pfilter-oper:pfilter-ma/Cisco-IOS-XR-ip-pfilter-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ip-pfilter-oper:pfilter-ma'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.nodes is not None and self.nodes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa']['meta_info']
| apache-2.0 | -1,239,986,136,252,464,000 | 38.027726 | 341 | 0.426257 | false |
abligh/xen-4.2-live-migrate | tools/python/xen/xend/xenstore/tests/stress_xs.py | 49 | 2700 | # This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright (c) 2005 XenSource Ltd
import random
import sys
import threading
import time
import xen.lowlevel.xs
from xen.xend.xenstore import xsutil
from xen.xend.xenstore.xstransact import xstransact
from xen.xend.xenstore.xswatch import xswatch
PATH = '/tool/stress_xs'
def stress():
xstransact.Remove(PATH)
xstransact.Mkdir(PATH)
xswatch(PATH, watch_callback)
def do(f):
t = threading.Thread(target=stress_write)
t.setDaemon(True)
t.start()
do(stress_write)
do(stress_get_domain_path)
do(stress_get_domain_path_xsutil)
do(stress_open_close)
while True:
# Wait for Ctrl-C.
time.sleep(100000000)
def stress_write():
xstransact.Write(PATH, 'key', '1')
while True:
val = xstransact.Gather(PATH, ('key', int))
xstransact.Store(PATH, ('key', val + 1))
random_sleep()
def stress_get_domain_path():
xs_handle = xen.lowlevel.xs.xs()
domid = 0
while True:
xs_handle.get_domain_path(domid)
domid += 1
random_sleep()
def stress_get_domain_path_xsutil():
domid = 0
while True:
xsutil.GetDomainPath(domid)
domid += 1
random_sleep()
def stress_open_close():
while True:
xs_handle = xen.lowlevel.xs.xs()
try:
try:
trans = xs_handle.transaction_start()
val = int(xs_handle.read(trans, PATH + '/key'))
xs_handle.write(trans, PATH + '/key', str(val + 1))
xs_handle.transaction_end(trans, False)
except:
xs_handle.transaction_end(trans, True)
random_sleep()
finally:
del xs_handle
def watch_callback(path):
random_sleep()
return True
def random_sleep():
d = random.randint(-50000, 500)
if d > 0:
time.sleep(d / 1000.0)
def main(argv = None):
if argv is None:
argv = sys.argv
stress()
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 | 6,433,003,891,580,499,000 | 21.31405 | 75 | 0.627407 | false |
MLnick/spark | examples/src/main/python/mllib/logistic_regression_with_lbfgs_example.py | 106 | 2082 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Logistic Regression With LBFGS Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
from pyspark.mllib.regression import LabeledPoint
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
# $example on$
# Load and parse the data
def parsePoint(line):
values = [float(x) for x in line.split(' ')]
return LabeledPoint(values[0], values[1:])
data = sc.textFile("data/mllib/sample_svm_data.txt")
parsedData = data.map(parsePoint)
# Build the model
model = LogisticRegressionWithLBFGS.train(parsedData)
# Evaluating the model on training data
labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
trainErr = labelsAndPreds.filter(lambda lp: lp[0] != lp[1]).count() / float(parsedData.count())
print("Training Error = " + str(trainErr))
# Save and load model
model.save(sc, "target/tmp/pythonLogisticRegressionWithLBFGSModel")
sameModel = LogisticRegressionModel.load(sc,
"target/tmp/pythonLogisticRegressionWithLBFGSModel")
# $example off$
| apache-2.0 | 4,443,358,212,936,988,000 | 37.555556 | 99 | 0.723823 | false |
yaukwankiu/armor | __init__pre_20140313.py | 1 | 2017 | """
I am completely rewriting this framework which was formerly known as weatherPattern. Yau Kwan Kiu, 802 CERB, NTU, 23-1-2013.
== Requirements ==
* python 2.7 or similar (python 2.5 will probably be okay, no python 3 please)
* numpy and scipy
* no sympy needed yet
* no opencv yet
== What's this? ==
ARMOR = Adjustment of Rainfall from MOdels using Radar, from WEather DEcision Technologies Inc, USA, based on the papers of [DuFran et al 2009], which builds on MAPLE (McGill Algorithm for Prediction by Lagrangian Extrapolation) based on [German, Zawadzki and Turner, 2001-2005] - see our 2012 Annual report to the Central Weather Bureau Taiwan for reference and details
This is our integrated ARMOR testing platform written in python. We shall develop and test our algorithms together here.
== Philosophy ==
* python as a glue, C or CUDA as the sword if need be
* keep it simple in design and use
* cross platform - at least running on pc and linux
== Design ==
* data structure:
** new style class (for future flexibility) armor.pattern.DBZ, (and other patterns for the future!), wrapping numpy.ma.MaskArray [ http://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#the-maskedarray-class ], with identifying parameters (name, time, etc), parameters for I/O (path for input/output, params for screen display, etc), as well as simple I/O and standard methods for data processing and analysis
** module package operations acting on armor.Pattern: armor.advection, armor.shiiba, armor.wavelet, armor.hht, armor.kmeans, armor.hmm, armor.morphology
** try to be as python3 friendly as possible
== Roadmap ==
* speeding up with CUDA
* integration with QGIS
* incorporating tools from opencv and sympy, such as SIFT/SURF, symbolic manipulations, etc
You can see the above with
import armor
help(armor)
...............Cheers, YKK 23-1-2013..............
"""
__all__ = ['pattern', 'advection', 'basicio', 'fft', 'hht', 'hmm', 'kmeans', 'morphology', 'shiiba', 'wavelet']
test_attr = 'haha!'
| cc0-1.0 | 6,770,884,905,230,082,000 | 44.840909 | 423 | 0.737234 | false |
boltnev/iktomi | tests/db/sqla/__init__.py | 4 | 2080 | import unittest
from sqlalchemy.exc import UnboundExecutionError
from sqlalchemy.orm import sessionmaker
from sqlalchemy import func
from iktomi.db.sqla import multidb_binds
from . import multidb_models
from .multidb_models import db1, db2
class MultidbTest(unittest.TestCase):
def setUp(self):
binds = multidb_binds({'db1': 'sqlite://', 'db2': 'sqlite://'},
package=multidb_models)
self.db = sessionmaker(binds=binds)()
db1.metadata.create_all(bind=self.db.get_bind(db1.SameName))
db1.metadata.create_all(bind=self.db.get_bind(db2.SameName))
def test_get_bind(self):
with self.assertRaises(UnboundExecutionError):
# Insure it's not bound to single engine
self.db.get_bind()
engine_common1 = self.db.get_bind(db1.SameName)
engine_common2 = self.db.get_bind(db2.SameName)
self.assertIsNot(engine_common1, engine_common2)
engine_different1 = self.db.get_bind(db1.DifferentName1)
self.assertIs(engine_common1, engine_different1)
engine_different2 = self.db.get_bind(db2.DifferentName2)
self.assertIs(engine_common2, engine_different2)
def test_missing_metadata(self):
with self.assertRaises(ImportError):
multidb_binds({'db1': 'sqlite://',
'db2': 'sqlite://',
'db3': 'sqlite://'},
package=multidb_models)
def test_query_class(self):
try:
self.db.query(db1.SameName).all()
except UnboundExecutionError as exc:
self.fail('Unexpected exception: {}'.format(exc))
def test_query_attr(self):
try:
self.db.query(db1.SameName.id).all()
except UnboundExecutionError as exc:
self.fail('Unexpected exception: {}'.format(exc))
def test_query_func(self):
try:
self.db.query(func.max(db1.SameName.id)).all()
except UnboundExecutionError as exc:
self.fail('Unexpected exception: {}'.format(exc))
| mit | -6,861,245,766,303,229,000 | 37.518519 | 71 | 0.622115 | false |
Voluntarynet/BitmessageKit | BitmessageKit/Vendor/static-python/Lib/traceback.py | 67 | 11255 | """Extract, format and print information about Python stack traces."""
import linecache
import sys
import types
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb', 'tb_lineno']
def _print(file, str='', terminator='\n'):
file.write(str+terminator)
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for filename, lineno, name, line in extracted_list:
_print(file,
' File "%s", line %d, in %s' % (filename,lineno,name))
if line:
_print(file, ' %s' % line.strip())
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
list = []
for filename, lineno, name, line in extracted_list:
item = ' File "%s", line %d, in %s\n' % (filename,lineno,name)
if line:
item = item + ' %s\n' % line.strip()
list.append(item)
return list
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file,
' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1
def format_tb(tb, limit = None):
"""A shorthand for 'format_list(extract_stack(f, limit))."""
return format_list(extract_tb(tb, limit))
def extract_tb(tb, limit = None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n+1
return list
def print_exception(etype, value, tb, limit=None, file=None):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
if file is None:
file = sys.stderr
if tb:
_print(file, 'Traceback (most recent call last):')
print_tb(tb, limit, file)
lines = format_exception_only(etype, value)
for line in lines:
_print(file, line, '')
def format_exception(etype, value, tb, limit = None):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
if tb:
list = ['Traceback (most recent call last):\n']
list = list + format_tb(tb, limit)
else:
list = []
list = list + format_exception_only(etype, value)
return list
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would raise another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except Exception:
pass
try:
value = unicode(value)
return value.encode("ascii", "backslashreplace")
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
def print_exc(limit=None, file=None):
"""Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'.
(In fact, it uses sys.exc_info() to retrieve the same information
in a thread-safe way.)"""
if file is None:
file = sys.stderr
try:
etype, value, tb = sys.exc_info()
print_exception(etype, value, tb, limit, file)
finally:
etype = value = tb = None
def format_exc(limit=None):
"""Like print_exc() but return a string."""
try:
etype, value, tb = sys.exc_info()
return ''.join(format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
def print_last(limit=None, file=None):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
if file is None:
file = sys.stderr
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file)
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
print_list(extract_stack(f, limit), file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
return format_list(extract_stack(f, limit))
def extract_stack(f=None, limit = None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
f = f.f_back
n = n+1
list.reverse()
return list
def tb_lineno(tb):
"""Calculate correct line number of traceback given in tb.
Obsolete in 2.3.
"""
return tb.tb_lineno
| mit | -7,451,505,135,514,413,000 | 34.282132 | 100 | 0.626033 | false |
deepsrijit1105/edx-platform | openedx/core/djangoapps/credit/admin.py | 27 | 2460 | """
Django admin page for credit eligibility
"""
from ratelimitbackend import admin
from openedx.core.djangoapps.credit.models import (
CreditConfig, CreditCourse, CreditProvider, CreditEligibility, CreditRequest, CreditRequirement,
CreditRequirementStatus
)
class CreditCourseAdmin(admin.ModelAdmin):
"""Admin for credit courses. """
list_display = ('course_key', 'enabled',)
list_filter = ('enabled',)
search_fields = ('course_key',)
class Meta(object):
model = CreditCourse
class CreditProviderAdmin(admin.ModelAdmin):
"""Admin for credit providers. """
list_display = ('provider_id', 'display_name', 'active',)
list_filter = ('active',)
search_fields = ('provider_id', 'display_name')
class Meta(object):
model = CreditProvider
class CreditEligibilityAdmin(admin.ModelAdmin):
"""Admin for credit eligibility. """
list_display = ('course', 'username', 'deadline')
search_fields = ('username', 'course__course_key')
class Meta(object):
model = CreditEligibility
class CreditRequestAdmin(admin.ModelAdmin):
"""Admin for credit requests. """
list_display = ('provider', 'course', 'status', 'username')
list_filter = ('provider', 'status',)
readonly_fields = ('uuid',)
search_fields = ('uuid', 'username', 'course__course_key', 'provider__provider_id')
class Meta(object):
model = CreditRequest
class CreditRequirementAdmin(admin.ModelAdmin):
""" Admin for CreditRequirement. """
list_display = ('course', 'namespace', 'name', 'display_name', 'active',)
list_filter = ('active', 'namespace',)
search_fields = ('course__course_key', 'namespace', 'name',)
class Meta(object):
model = CreditRequirement
class CreditRequirementStatusAdmin(admin.ModelAdmin):
""" Admin for CreditRequirementStatus. """
list_display = ('username', 'requirement', 'status',)
search_fields = ('username', 'requirement__course__course_key',)
class Meta(object):
model = CreditRequirementStatus
admin.site.register(CreditCourse, CreditCourseAdmin)
admin.site.register(CreditProvider, CreditProviderAdmin)
admin.site.register(CreditEligibility, CreditEligibilityAdmin)
admin.site.register(CreditRequest, CreditRequestAdmin)
admin.site.register(CreditConfig)
admin.site.register(CreditRequirement, CreditRequirementAdmin)
admin.site.register(CreditRequirementStatus, CreditRequirementStatusAdmin)
| agpl-3.0 | -5,727,921,518,648,047,000 | 30.948052 | 100 | 0.703659 | false |
statgen/encore | encore/tests/api_tests.py | 1 | 1036 | import pytest
import flask_login
from encore import create_app
from encore.user import User
@pytest.fixture(scope="module")
def app(request):
app = create_app()
ctx = app.app_context()
ctx.push()
request.addfinalizer(ctx.pop)
return app
@pytest.fixture(scope="module")
def test_client(request, app):
client = app.test_client()
client.__enter__()
request.addfinalizer(lambda: client.__exit__(None, None, None))
return client
@pytest.fixture(scope="module")
def test_client_user(request, app):
client = app.test_client()
client.__enter__()
with client.session_transaction() as sess:
sess["user_id"] = "[email protected]"
sess["_fresh"] = True
request.addfinalizer(lambda: client.__exit__(None, None, None))
return client
def test_home_anon(test_client):
rv = test_client.get("/")
assert b'please sign in' in rv.data
assert rv.status_code == 200
def test_home_user(test_client_user):
rv = test_client_auth.get("/")
assert b'Welcome' in rv.data
| agpl-3.0 | -1,688,377,205,503,942,100 | 26.263158 | 67 | 0.662162 | false |
williamfeng323/py-web | flask/lib/python3.6/site-packages/psycopg2/psycopg1.py | 8 | 3339 | """psycopg 1.1.x compatibility module
This module uses the new style connection and cursor types to build a psycopg
1.1.1.x compatibility layer. It should be considered a temporary hack to run
old code while porting to psycopg 2. Import it as follows::
from psycopg2 import psycopg1 as psycopg
"""
# psycopg/psycopg1.py - psycopg 1.1.x compatibility module
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
from psycopg2 import _psycopg as _2psycopg # noqa
from psycopg2.extensions import cursor as _2cursor
from psycopg2.extensions import connection as _2connection
from psycopg2 import * # noqa
from psycopg2 import extensions as _ext
_2connect = connect
def connect(*args, **kwargs):
"""connect(dsn, ...) -> new psycopg 1.1.x compatible connection object"""
kwargs['connection_factory'] = connection
conn = _2connect(*args, **kwargs)
conn.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
return conn
class connection(_2connection):
"""psycopg 1.1.x connection."""
def cursor(self):
"""cursor() -> new psycopg 1.1.x compatible cursor object"""
return _2connection.cursor(self, cursor_factory=cursor)
def autocommit(self, on_off=1):
"""autocommit(on_off=1) -> switch autocommit on (1) or off (0)"""
if on_off > 0:
self.set_isolation_level(_ext.ISOLATION_LEVEL_AUTOCOMMIT)
else:
self.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
class cursor(_2cursor):
"""psycopg 1.1.x cursor.
Note that this cursor implements the exact procedure used by psycopg 1 to
build dictionaries out of result rows. The DictCursor in the
psycopg.extras modules implements a much better and faster algorithm.
"""
def __build_dict(self, row):
res = {}
for i in range(len(self.description)):
res[self.description[i][0]] = row[i]
return res
def dictfetchone(self):
row = _2cursor.fetchone(self)
if row:
return self.__build_dict(row)
else:
return row
def dictfetchmany(self, size):
res = []
rows = _2cursor.fetchmany(self, size)
for row in rows:
res.append(self.__build_dict(row))
return res
def dictfetchall(self):
res = []
rows = _2cursor.fetchall(self)
for row in rows:
res.append(self.__build_dict(row))
return res
| mit | -1,886,374,107,592,166,100 | 33.78125 | 77 | 0.684636 | false |
aeron15/ruffus | ruffus/test/test_transform_with_no_re_matches.py | 5 | 3227 | #!/usr/bin/env python
from __future__ import print_function
"""
test_transform_with_no_re_matches.py
test messages with no regular expression matches
"""
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
from ruffus import transform, regex, pipeline_run, Pipeline, originate, mkdir
import ruffus
print(" Ruffus Version = ", ruffus.__version__)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
@mkdir(tempdir)
@originate(tempdir + "a")
def task_1 (o):
open(o, 'w').close()
@transform(task_1, regex("b"), "task_2.output")
def task_2 (i, o):
for f in o:
with open(f, 'w') as oo:
pass
import unittest
class t_save_to_str_logger:
"""
Everything to stderr
"""
def __init__ (self):
self.info_str = ""
self.warning_str = ""
self.debug_str = ""
def info (self, message):
self.info_str += message
def warning (self, message):
self.warning_str += message
def debug (self, message):
self.debug_str += message
class Test_task_mkdir(unittest.TestCase):
def setUp (self):
"""
"""
pass
def tearDown (self):
"""
"""
for d in ['a']:
fullpath = os.path.join(os.path.dirname(__file__), tempdir + d)
os.unlink(fullpath)
os.rmdir(tempdir)
def test_no_re_match (self):
save_to_str_logger = t_save_to_str_logger()
pipeline_run(multiprocess = 10, logger = save_to_str_logger, verbose = 1, pipeline= "main")
print(save_to_str_logger.warning_str)
self.assertTrue("no file names matched" in save_to_str_logger.warning_str)
print("\n Warning printed out correctly", file=sys.stderr)
def test_newstyle_no_re_match (self):
test_pipeline = Pipeline("test")
test_pipeline.originate(task_1, tempdir + "a").mkdir(tempdir)
test_pipeline.transform(task_2, task_1, regex("b"), "task_2.output")
save_to_str_logger = t_save_to_str_logger()
test_pipeline.run(multiprocess = 10, logger = save_to_str_logger, verbose = 1)
print(save_to_str_logger.warning_str)
self.assertTrue("no file names matched" in save_to_str_logger.warning_str)
print("\n Warning printed out correctly", file=sys.stderr)
if __name__ == '__main__':
unittest.main()
| mit | -3,462,820,483,330,502,700 | 24.816 | 99 | 0.669352 | false |
jaingaurav/Diamond | src/collectors/cephstats/cephstats.py | 26 | 1581 | # coding=utf-8
"""
Get ceph status from one node
"""
import subprocess
import re
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'ceph'))
from ceph import CephCollector
patternchk = re.compile(r'\bclient io .*')
numberchk = re.compile(r'\d+')
# This is external to the CephCollector so it can be tested
# separately.
def process_ceph_status(output):
res = patternchk.search(output)
if not res:
return {}
ceph_stats = res.group()
if not ceph_stats:
return {}
ret = {}
rd = wr = iops = None
rd = numberchk.search(ceph_stats)
if rd is not None:
ret['rd'] = rd.group()
wr = numberchk.search(ceph_stats, rd.end())
if wr is not None:
ret['wr'] = wr.group()
iops = numberchk.search(ceph_stats, wr.end())
if iops is not None:
ret['iops'] = iops.group()
return ret
class CephStatsCollector(CephCollector):
def _get_stats(self):
"""
Get ceph stats
"""
try:
output = subprocess.check_output(['ceph', '-s'])
except subprocess.CalledProcessError, err:
self.log.info(
'Could not get stats: %s' % err)
self.log.exception('Could not get stats')
return {}
return process_ceph_status(output)
def collect(self):
"""
Collect ceph stats
"""
stats = self._get_stats()
self._publish_stats('cephstats', stats)
return
| mit | -4,525,459,448,617,787,000 | 23.323077 | 75 | 0.555977 | false |
tersmitten/ansible | lib/ansible/plugins/shell/powershell.py | 30 | 12111 | # Copyright (c) 2014, Chris Church <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: powershell
plugin_type: shell
version_added: historical
short_description: Windows PowerShell
description:
- The only option when using 'winrm' or 'psrp' as a connection plugin.
- Can also be used when using 'ssh' as a connection plugin and the C(DefaultShell) has been configured to PowerShell.
extends_documentation_fragment:
- shell_windows
'''
import base64
import os
import re
import shlex
import pkgutil
import xml.etree.ElementTree as ET
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.shell import ShellBase
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
# Primarily for testing, allow explicitly specifying PowerShell version via
# an environment variable.
_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
if _powershell_version:
_common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
def _parse_clixml(data, stream="Error"):
"""
Takes a byte string like '#< CLIXML\r\n<Objs...' and extracts the stream
message encoded in the XML data. CLIXML is used by PowerShell to encode
multiple objects in stderr.
"""
clixml = ET.fromstring(data.split(b"\r\n", 1)[-1])
namespace_match = re.match(r'{(.*)}', clixml.tag)
namespace = "{%s}" % namespace_match.group(1) if namespace_match else ""
strings = clixml.findall("./%sS" % namespace)
lines = [e.text.replace('_x000D__x000A_', '') for e in strings if e.attrib.get('S') == stream]
return to_bytes('\r\n'.join(lines))
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles
# Powershell is handled differently. It's selected when winrm is the
# connection
COMPATIBLE_SHELLS = frozenset()
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'powershell'
_SHELL_REDIRECT_ALLNULL = '> $null'
_SHELL_AND = ';'
# Used by various parts of Ansible to do Windows specific changes
_IS_WINDOWS = True
env = dict()
# We're being overly cautious about which keys to accept (more so than
# the Windows environment is capable of doing), since the powershell
# env provider's limitations don't appear to be documented.
safe_envkey = re.compile(r'^[\d\w_]{1,255}$')
# TODO: add binary module support
def assert_safe_env_key(self, key):
if not self.safe_envkey.match(key):
raise AnsibleError("Invalid PowerShell environment key: %s" % key)
return key
def safe_env_value(self, key, value):
if len(value) > 32767:
raise AnsibleError("PowerShell environment value for key '%s' exceeds 32767 characters in length" % key)
# powershell single quoted literals need single-quote doubling as their only escaping
value = value.replace("'", "''")
return to_text(value, errors='surrogate_or_strict')
def env_prefix(self, **kwargs):
# powershell/winrm env handling is handled in the exec wrapper
return ""
def join_path(self, *args):
parts = []
for arg in args:
arg = self._unquote(arg).replace('/', '\\')
parts.extend([a for a in arg.split('\\') if a])
path = '\\'.join(parts)
if path.startswith('~'):
return path
return path
def get_remote_filename(self, pathname):
# powershell requires that script files end with .ps1
base_name = os.path.basename(pathname.strip())
name, ext = os.path.splitext(base_name.strip())
if ext.lower() not in ['.ps1', '.exe']:
return name + '.ps1'
return base_name.strip()
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, paths, mode):
raise NotImplementedError('chmod is not implemented for Powershell')
def chown(self, paths, user):
raise NotImplementedError('chown is not implemented for Powershell')
def set_user_facl(self, paths, user, mode):
raise NotImplementedError('set_user_facl is not implemented for Powershell')
def remove(self, path, recurse=False):
path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
# Windows does not have an equivalent for the system temp files, so
# the param is ignored
basefile = self._escape(self._unquote(basefile))
basetmpdir = tmpdir if tmpdir else self.get_option('remote_tmp')
script = '''
$tmp_path = [System.Environment]::ExpandEnvironmentVariables('%s')
$tmp = New-Item -Type Directory -Path $tmp_path -Name '%s'
Write-Output -InputObject $tmp.FullName
''' % (basetmpdir, basefile)
return self._encode_script(script.strip())
def expand_user(self, user_home_path, username=''):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
user_home_path = self._unquote(user_home_path)
if user_home_path == '~':
script = 'Write-Output (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = 'Write-Output ((Get-Location).Path + "%s")' % self._escape(user_home_path[1:])
else:
script = 'Write-Output "%s"' % self._escape(user_home_path)
return self._encode_script(script)
def exists(self, path):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path "%s")
{
$res = 0;
}
Else
{
$res = 1;
}
Write-Output "$res";
Exit $res;
''' % path
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Output "3";
}
Else
{
Write-Output "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, arg_path=None):
bootstrap_wrapper = pkgutil.get_data("ansible.executor.powershell", "bootstrap_wrapper.ps1")
# pipelining bypass
if cmd == '':
return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
# non-pipelining
cmd_parts = shlex.split(cmd, posix=False)
cmd_parts = list(map(to_text, cmd_parts))
if shebang and shebang.lower() == '#!powershell':
if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
# we're running a module via the bootstrap wrapper
cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
return wrapper_cmd
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
elif not shebang:
# The module is assumed to be a binary
cmd_parts[0] = self._unquote(cmd_parts[0])
cmd_parts.append(arg_path)
script = '''
Try
{
%s
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
''' % (env_string, ' '.join(cmd_parts))
return self._encode_script(script, preserve_rc=False)
def wrap_for_exec(self, cmd):
return '& %s; exit $LASTEXITCODE' % cmd
def _unquote(self, value):
'''Remove any matching quotes that wrap the given value.'''
value = to_text(value or '')
m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
if m:
return m.group(1)
m = re.match(r'^\s*?"(.*?)"\s*?$', value)
if m:
return m.group(1)
return value
def _escape(self, value, include_vars=False):
'''Return value escaped for use in PowerShell command.'''
# http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
# http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
('\'', '`\''), ('`', '``'), ('\x00', '`0')]
if include_vars:
subs.append(('$', '`$'))
pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
substs = [s for p, s in subs]
def replace(m):
return substs[m.lastindex - 1]
return re.sub(pattern, replace, value)
def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = to_text(script)
if script == u'-':
cmd_parts = _common_args + ['-Command', '-']
else:
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
# try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
# NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
if preserve_rc:
script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
% script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
| gpl-3.0 | -2,502,365,957,732,336,000 | 37.942122 | 155 | 0.570721 | false |
Kovath/league-announcer | log_analyzer.py | 1 | 1527 | #!/usr/bin/env python
# LOG ANALYZER
# by Kevin Yang
#
# Assumes VERY MUCH THIS FORMAT:
# [<time>] <event> -> <data>
import sys, re
import numpy as np
def find_outliers(array, mean = None, std = None, m = 6):
if mean == None:
mean = np.mean(array)
if std == None:
std = np.std(array)
return array[abs(array - mean) >= m * std]
if __name__ == "__main__":
log_file = open(sys.argv[1])
regex = re.compile(r"\[(?P<time>\d+)\] (?P<event>\w*) -> (?P<data>.*)")
events = []
for line in log_file:
match = re.search(regex, line)
if match == None:
print("error parsing line: " + line)
continue
event = {
"time" : match.group("time"),
"event" : match.group("event"),
"data" : eval(match.group("data")),
}
events.append(event)
if len(events) <= 0:
exit()
data_query = dict([(key, []) for key in events[0]["data"].keys()])
for event in events:
for key in data_query.keys():
data_query[key].append(event["data"][key])
for query, data in data_query.items():
data_query[query] = { "data" : np.array(data) }
# calculations
for query, stats in data_query.items():
data = stats["data"]
stats["median"] = np.median(data)
stats["mean"] = np.mean(data)
stats["min"] = np.min(data)
stats["max"] = np.max(data)
stats["std"] = np.std(data)
stats["outliers"] = find_outliers(data, stats["mean"], stats["std"])
# output
for query, stats in data_query.items():
print(query + ":")
for stat, value in stats.items():
print("\t%s: %s" % (stat, str(value)))
print("") | mit | -2,527,666,009,925,682,000 | 21.470588 | 72 | 0.59594 | false |
40123148/40123148 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_case.py | 738 | 51689 | import difflib
import pprint
import pickle
import re
import sys
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult,
ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferrable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
| lgpl-3.0 | -749,433,752,865,654,800 | 38.307224 | 87 | 0.595794 | false |
gangadharkadam/v5_frappe | frappe/utils/data.py | 4 | 17064 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# IMPORTANT: only import safe functions as this module will be included in jinja environment
import frappe
import operator
import re, urllib, datetime, math
import babel.dates
# datetime functions
def getdate(string_date):
"""
Coverts string date (yyyy-mm-dd) to datetime.date object
"""
if isinstance(string_date, datetime.date):
return string_date
elif isinstance(string_date, datetime.datetime):
return string_date.date()
if " " in string_date:
string_date = string_date.split(" ")[0]
return datetime.datetime.strptime(string_date, "%Y-%m-%d").date()
def add_to_date(date, years=0, months=0, days=0):
"""Adds `days` to the given date"""
format = isinstance(date, basestring)
if date:
date = getdate(date)
else:
raise Exception, "Start date required"
from dateutil.relativedelta import relativedelta
date += relativedelta(years=years, months=months, days=days)
if format:
return date.strftime("%Y-%m-%d")
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).total_seconds()
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).total_seconds()) / 3600, 6)
def now_datetime():
return convert_utc_to_user_timezone(datetime.datetime.utcnow())
def get_user_time_zone():
if getattr(frappe.local, "user_time_zone", None) is None:
frappe.local.user_time_zone = frappe.cache().get_value("time_zone")
if not frappe.local.user_time_zone:
frappe.local.user_time_zone = frappe.db.get_default('time_zone') or 'Asia/Calcutta'
frappe.cache().set_value("time_zone", frappe.local.user_time_zone)
return frappe.local.user_time_zone
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone, UnknownTimeZoneError
utcnow = timezone('UTC').localize(utc_timestamp)
try:
return utcnow.astimezone(timezone(get_user_time_zone()))
except UnknownTimeZoneError:
return utcnow
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if getattr(frappe.local, "current_date", None):
return getdate(frappe.local.current_date).strftime("%Y-%m-%d") + " " + \
now_datetime().strftime('%H:%M:%S.%f')
else:
return now_datetime().strftime('%Y-%m-%d %H:%M:%S.%f')
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime('%Y-%m-%d')
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime('%H:%M:%S.%f')
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_datetime(datetime_str):
try:
return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S.%f')
except TypeError:
if isinstance(datetime_str, datetime.datetime):
return datetime_str.replace(tzinfo=None)
else:
raise
except ValueError:
if datetime_str=='0000-00-00 00:00:00.000000':
return None
return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, basestring):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
def formatdate(string_date=None, format_string=None):
"""
Convers the given string date to :data:`user_format`
User format specified in defaults
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
date = getdate(string_date) if string_date else now_datetime().date()
if format_string:
return babel.dates.format_date(date, format_string or "medium", locale=(frappe.local.lang or "").replace("-", "_"))
else:
if getattr(frappe.local, "user_format", None) is None:
frappe.local.user_format = frappe.db.get_default("date_format")
out = frappe.local.user_format or "yyyy-mm-dd"
try:
return out.replace("dd", date.strftime("%d"))\
.replace("mm", date.strftime("%m"))\
.replace("yyyy", date.strftime("%Y"))
except ValueError, e:
raise frappe.ValidationError, str(e)
def global_date_format(date):
"""returns date as 1 January 2012"""
formatted_date = getdate(date).strftime("%d %B %Y")
return formatted_date.startswith("0") and formatted_date[1:] or formatted_date
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, basestring):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = rounded(num, precision)
except Exception:
num = 0
return num
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def cstr(s):
if isinstance(s, unicode):
return s
elif s==None:
return ''
elif isinstance(s, basestring):
return unicode(s, 'utf-8')
else:
return unicode(s)
def rounded(num, precision=0):
"""round method for round halfs to nearest even algorithm"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
floor = math.floor(num)
decimal_part = num - floor
if decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, unicode):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, unicode):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
if isinstance(v, (datetime.date, datetime.datetime)):
v = unicode(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(unicode(v).split(":")[:2])
elif isinstance(v, long):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = None
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format")
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, number_format_precision = get_number_format_info(number_format)
if precision is None:
precision = number_format_precision
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + ((precision and decimal_str) and (decimal_str + decimals) or "")
amount = minus + amount
if currency and frappe.defaults.get_global_default("hide_currency_symbol") != "Yes":
symbol = frappe.db.get_value("Currency", currency, "symbol") or currency
amount = symbol + " " + amount
return amount
number_format_info = {
"#,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"# ###,##": (",", " ", 2),
"#'###.##": (".", "'", 2),
"#, ###.##": (".", ", ", 2),
"#,##,###.##": (".", ",", 2),
"#,###.###": (".", ",", 3),
"#.###": ("", ".", 0),
"#,###": ("", ",", 0)
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convet currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
from frappe.utils import get_defaults
_ = frappe._
if not number or flt(number) < 0:
return ""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction") or _("Cent")
n = "%.2f" % flt(number)
main, fraction = n.split('.')
if len(fraction)==1: fraction += '0'
number_format = frappe.db.get_value("Currency", main_currency, "number_format") or \
frappe.db.get_default("number_format") or "#,###.##"
in_million = True
if number_format == "#,##,###.##": in_million = False
out = main_currency + ' ' + in_words(main, in_million).title()
if cint(fraction):
out = out + ' ' + _('and') + ' ' + in_words(fraction, in_million).title() + ' ' + fraction_currency
return out + ' ' + _('only.')
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
_ = frappe._
n=int(integer)
known = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',
11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen',
19: 'nineteen', 20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
def psn(n, known, xpsn):
import sys;
if n in known: return known[n]
bestguess, remainder = str(n), 0
if n<=20:
frappe.errprint(sys.stderr)
frappe.errprint(n)
frappe.errprint("How did this happen?")
assert 0
elif n < 100:
bestguess= xpsn((n//10)*10, known, xpsn) + '-' + xpsn(n%10, known, xpsn)
return bestguess
elif n < 1000:
bestguess= xpsn(n//100, known, xpsn) + ' ' + _('hundred')
remainder = n%100
else:
if in_million:
if n < 1000000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + _('thousand')
remainder = n%1000
elif n < 1000000000:
bestguess= xpsn(n//1000000, known, xpsn) + ' ' + _('million')
remainder = n%1000000
else:
bestguess= xpsn(n//1000000000, known, xpsn) + ' ' + _('billion')
remainder = n%1000000000
else:
if n < 100000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + _('thousand')
remainder = n%1000
elif n < 10000000:
bestguess= xpsn(n//100000, known, xpsn) + ' ' + _('lakh')
remainder = n%100000
else:
bestguess= xpsn(n//10000000, known, xpsn) + ' ' + _('crore')
remainder = n%10000000
if remainder:
if remainder >= 100:
comma = ','
else:
comma = ''
return bestguess + comma + ' ' + xpsn(remainder, known, xpsn)
else:
return bestguess
return psn(n, known, psn)
def is_html(text):
out = False
for key in ["<br>", "<p", "<img", "<div"]:
if key in text:
out = True
break
return out
# from Jinja2 code
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
def strip_html(text):
"""removes anything enclosed in and including <>"""
return _striptags_re.sub("", text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
if not iso_datetime: return ''
import math
if isinstance(iso_datetime, basestring):
iso_datetime = datetime.datetime.strptime(iso_datetime, '%Y-%m-%d %H:%M:%S.%f')
now_dt = datetime.datetime.strptime(now(), '%Y-%m-%d %H:%M:%S.%f')
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return 'just now'
elif dt_diff_seconds < 120.0:
return '1 minute ago'
elif dt_diff_seconds < 3600.0:
return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0))
elif dt_diff_seconds < 7200.0:
return '1 hour ago'
elif dt_diff_seconds < 86400.0:
return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0))
elif dt_diff_days == 1.0:
return 'Yesterday'
elif dt_diff_days < 7.0:
return '%s days ago' % cint(dt_diff_days)
elif dt_diff_days < 31.0:
return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0))
elif dt_diff_days < 365.0:
return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0))
else:
return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0))
def comma_or(some_list):
return comma_sep(some_list, " or ")
def comma_and(some_list):
return comma_sep(some_list, " and ")
def comma_sep(some_list, sep):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list]
return ", ".join(some_list[:-1]) + sep + some_list[-1]
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_url(uri=None, full_address=False):
"""get app url from request"""
host_name = frappe.local.conf.host_name
if not host_name:
if hasattr(frappe.local, "request") and frappe.local.request and frappe.local.request.host:
protocol = 'https' == frappe.get_request_header('X-Forwarded-Proto', "") and 'https://' or 'http://'
host_name = protocol + frappe.local.request.host
elif frappe.local.site:
host_name = "http://{}".format(frappe.local.site)
else:
host_name = frappe.db.get_value("Website Settings", "Website Settings",
"subdomain")
if host_name and "http" not in host_name:
host_name = "http://" + host_name
if not host_name:
host_name = "http://localhost"
if not uri and full_address:
uri = frappe.get_request_header("REQUEST_URI", "")
url = urllib.basejoin(host_name, uri) if uri else host_name
return url
def get_host_name():
return get_url().rsplit("//", 1)[-1]
def get_url_to_form(doctype, name, label=None):
if not label: label = name
return """<a href="/desk#!Form/%(doctype)s/%(name)s">%(label)s</a>""" % locals()
operator_map = {
# startswith
"^": lambda (a, b): (a or "").startswith(b),
# in or not in a list
"in": lambda (a, b): operator.contains(b, a),
"not in": lambda (a, b): not operator.contains(b, a),
# comparison operators
"=": lambda (a, b): operator.eq(a, b),
"!=": lambda (a, b): operator.ne(a, b),
">": lambda (a, b): operator.gt(a, b),
"<": lambda (a, b): operator.lt(a, b),
">=": lambda (a, b): operator.ge(a, b),
"<=": lambda (a, b): operator.le(a, b),
"not None": lambda (a, b): a and True or False,
"None": lambda (a, b): (not a) and True or False
}
def compare(val1, condition, val2):
ret = False
if condition in operator_map:
ret = operator_map[condition]((val1, val2))
return ret
def scrub_urls(html):
html = expand_relative_urls(html)
html = quote_urls(html)
return html
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
return "".join(to_expand)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
def quote_urls(html):
def _quote_url(match):
groups = list(match.groups())
groups[2] = urllib.quote(groups[2].encode("utf-8"), safe=b"~@#$&()*!+=:;,.?/'").decode("utf-8")
return "".join(groups)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?:http)[^\'">]+)([\'"]?)',
_quote_url, html)
def unique(seq):
"""use this instead of list(set()) to preserve order of the original list.
Thanks to Stackoverflow: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x)) ]
| mit | -9,066,791,448,070,147,000 | 27.158416 | 145 | 0.643108 | false |
cloudify-cosmo/cloudify-diamond-plugin | diamond_agent/tests/test_single_node.py | 1 | 7362 | import os
import time
import tempfile
import pickle as cPickle
import mock
from testtools import TestCase
from cloudify.workflows import local
from cloudify.decorators import operation
from diamond_agent import tasks
from diamond_agent.tests import IGNORED_LOCAL_WORKFLOW_MODULES
class TestSingleNode(TestCase):
def setUp(self):
super(TestSingleNode, self).setUp()
os.environ['MANAGEMENT_IP'] = '127.0.0.1'
self.is_uninstallable = True
self.env = None
self._original_get_agent_name = tasks._get_agent_name
tasks._get_agent_name = mock.MagicMock(return_value='agent_name')
self.addCleanup(self._unmock_agent_name)
def tearDown(self):
super(TestSingleNode, self).tearDown()
if self.env and self.is_uninstallable:
self.env.execute('uninstall', task_retries=0)
# custom handler + custom collector
def test_custom_collectors(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
}
}
}
},
'collectors_config': {
'TestCollector': {
'path': 'collectors/test.py',
'config': {
'name': 'metric',
'value': 42,
},
},
},
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_cloudify_handler_format(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
'output_cloudify_format': True,
}
}
}
},
'collectors_config': {
'TestCollector': {
'path': 'collectors/test.py',
'config': {
'name': 'metric',
'value': 42,
},
},
},
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
# custom handler + no collector
# diamond should run without outputting anything
def test_no_collectors(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
},
}
}
},
'collectors_config': {}
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_uninstall_workflow(self):
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'diamond.handler.archive.ArchiveHandler': {
'config': {
'log_file': tempfile.mktemp(),
}
}
}
},
'collectors_config': {},
}
self.is_uninstallable = False
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_no_handlers(self):
inputs = {
'diamond_config': {
'handlers': {},
},
'collectors_config': {},
}
self.is_uninstallable = False
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_restart_plugin_script(self):
"""A script that restarts diamond doesn't interfere with the plugin.
If the add_collectors tasks run in parallel with a script that
also happens to restart diamond, there's a race condition between them
looking up the process by the PID, making one of them to break.
"""
blueprint_yaml = self._get_resource_path('blueprint',
'restart_diamond_script.yaml')
self.is_uninstallable = False
local_env = local.init_env(
blueprint_yaml, ignored_modules=IGNORED_LOCAL_WORKFLOW_MODULES)
self.addCleanup(local_env.execute, 'uninstall')
# this needs a threadpool size >1 so that the add_collectors task
# can run in parallel with the custom restart task
local_env.execute('install', task_thread_pool_size=5)
def _mock_get_paths(self, prefix):
return [
os.path.join(prefix, 'etc', tasks.CONFIG_NAME),
os.path.join(prefix, 'etc', 'collectors'),
os.path.join(prefix, 'collectors'),
os.path.join(prefix, 'etc', 'handlers'),
os.path.join(prefix, 'handlers')
]
def _create_env(self, inputs):
return local.init_env(self._blueprint_path(),
inputs=inputs,
ignored_modules=IGNORED_LOCAL_WORKFLOW_MODULES)
def _blueprint_path(self):
return self._get_resource_path('blueprint', 'single_node.yaml')
def _get_resource_path(self, *args):
return os.path.join(os.path.dirname(__file__), 'resources', *args)
def _unmock_agent_name(self):
tasks._get_agent_name = self._original_get_agent_name
def collector_in_log(path, collector):
with open(path, 'r') as fh:
try:
while True:
metric = cPickle.load(fh)
if metric.path.split('.')[3] == collector:
return True
except EOFError:
return False
def is_created(path, timeout=5):
for _ in range(timeout):
if os.path.isfile(path):
return True
time.sleep(1)
return False
def get_ids(instances, name):
for instance in instances:
if instance['name'] == name:
return instance['host_id'], instance['node_id'], instance['id']
def get_pid(config):
pid_file = os.path.join(config['diamond_config']['prefix'],
'var', 'run', 'diamond.pid')
with open(pid_file, 'r') as pf:
pid = int(pf.read())
return pid
@operation
def sleep_and_restart_diamond(ctx):
"""Restart diamond 5 times, with 3 second pauses between restarts.
This is a task used in the TestSingleNode.test_restart_plugin_script test.
"""
ctx.logger.info('Foo')
| apache-2.0 | -4,313,866,447,422,166,500 | 31.72 | 79 | 0.502445 | false |
varunagrawal/azure-services | varunagrawal/site-packages/django/contrib/admindocs/views.py | 77 | 15064 | import inspect
import os
import re
from django import template
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
@staff_member_required
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': urlresolvers.reverse('admin:index'),
}, context_instance=RequestContext(request))
@staff_member_required
def bookmarklets(request):
admin_root = urlresolvers.reverse('admin:index')
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
@staff_member_required
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'tags': tags
}, context_instance=RequestContext(request))
@staff_member_required
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'filters': filters
}, context_instance=RequestContext(request))
@staff_member_required
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'views': views
}, context_instance=RequestContext(request))
@staff_member_required
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
@staff_member_required
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'models': m_list
}, context_instance=RequestContext(request))
@staff_member_required
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
@staff_member_required
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| gpl-2.0 | -6,575,094,731,674,168,000 | 38.434555 | 180 | 0.594264 | false |
lasote/conan | conans/test/integration/profile_test.py | 1 | 16563 | import unittest
from conans.client import tools
from conans.test.utils.tools import TestClient
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.util.files import save, load
import os
from conans.paths import CONANFILE
from collections import OrderedDict
from conans.test.utils.test_files import temp_folder
from conans.test.utils.profiles import create_profile as _create_profile
from nose_parameterized import parameterized
conanfile_scope_env = """
import platform
from conans import ConanFile
class AConan(ConanFile):
name = "Hello0"
version = "0.1"
settings = "os", "compiler", "arch"
def build(self):
self.output.warn("Scope myscope: %s" % self.scope.myscope)
self.output.warn("Scope otherscope: %s" % self.scope.otherscope)
self.output.warn("Scope undefined: %s" % self.scope.undefined)
# Print environment vars
if self.settings.os == "Windows":
self.run("SET")
else:
self.run("env")
"""
def create_profile(folder, name, settings=None, scopes=None, package_settings=None, env=None,
package_env=None, options=None):
_create_profile(folder, name, settings, scopes, package_settings, env, package_env, options)
content = load(os.path.join(folder, name))
content = "include(default)\n \n" + content
save(os.path.join(folder, name), content)
class ProfileTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
def bad_syntax_test(self):
self.client.save({CONANFILE: conanfile_scope_env})
self.client.run("export lasote/stable")
profile = '''
[settings
'''
clang_profile_path = os.path.join(self.client.client_cache.profiles_path, "clang")
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile", self.client.user_io.out)
self.assertIn("Bad syntax", self.client.user_io.out)
profile = '''
[settings]
[invented]
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Unrecognized field 'invented'", self.client.user_io.out)
self.assertIn("Error reading 'clang' profile", self.client.user_io.out)
profile = '''
[settings]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Invalid setting line 'as'",
self.client.user_io.out)
profile = '''
[env]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Invalid env line 'as'",
self.client.user_io.out)
profile = '''
[scopes]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Bad scope as", self.client.user_io.out)
profile = '''
[settings]
os = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
# stripped "a value"
self.assertIn("'a value' is not a valid 'settings.os'", self.client.user_io.out)
profile = '''
include(default)
[env]
ENV_VAR = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang")
self._assert_env_variable_printed("ENV_VAR", "a value")
profile = '''
include(default)
# Line with comments is not a problem
[env]
# Not even here
ENV_VAR = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build -pr clang")
self._assert_env_variable_printed("ENV_VAR", "a value")
@parameterized.expand([("", ), ("./local_profiles/", ), (temp_folder() + "/", )])
def install_with_missing_profile_test(self, path):
self.client.save({CONANFILE: conanfile_scope_env})
error = self.client.run('install -pr "%sscopes_env"' % path, ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Profile not found:", self.client.out)
self.assertIn("scopes_env", self.client.out)
def install_profile_env_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
files["conanfile.py"] = conanfile_scope_env
create_profile(self.client.client_cache.profiles_path, "envs", settings={},
env=[("A_VAR", "A_VALUE")], package_env={"Hello0": [("OTHER_VAR", "2")]})
self.client.save(files)
self.client.run("export lasote/stable")
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr envs")
self._assert_env_variable_printed("A_VAR", "A_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "2")
# Override with package var
self.client.run("install Hello0/0.1@lasote/stable --build "
"-pr envs -e Hello0:A_VAR=OTHER_VALUE")
self._assert_env_variable_printed("A_VAR", "OTHER_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "2")
# Override package var with package var
self.client.run("install Hello0/0.1@lasote/stable --build -pr envs "
"-e Hello0:A_VAR=OTHER_VALUE -e Hello0:OTHER_VAR=3")
self._assert_env_variable_printed("A_VAR", "OTHER_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "3")
# Pass a variable with "=" symbol
self.client.run("install Hello0/0.1@lasote/stable --build -pr envs "
"-e Hello0:A_VAR=Valuewith=equal -e Hello0:OTHER_VAR=3")
self._assert_env_variable_printed("A_VAR", "Valuewith=equal")
self._assert_env_variable_printed("OTHER_VAR", "3")
def install_profile_settings_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
# Create a profile and use it
profile_settings = OrderedDict([("compiler", "Visual Studio"),
("compiler.version", "12"),
("compiler.runtime", "MD"),
("arch", "x86")])
create_profile(self.client.client_cache.profiles_path, "vs_12_86",
settings=profile_settings, package_settings={})
tools.replace_in_file(self.client.client_cache.default_profile_path,
"compiler.libcxx", "#compiler.libcxx", strict=False)
self.client.save(files)
self.client.run("export lasote/stable")
self.client.run("install --build missing -pr vs_12_86")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
for setting, value in profile_settings.items():
self.assertIn("%s=%s" % (setting, value), info)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86 -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
for setting, value in profile_settings.items():
if setting != "compiler.version":
self.assertIn("%s=%s" % (setting, value), info)
else:
self.assertIn("compiler.version=14", info)
# Use package settings in profile
tmp_settings = OrderedDict()
tmp_settings["compiler"] = "gcc"
tmp_settings["compiler.libcxx"] = "libstdc++11"
tmp_settings["compiler.version"] = "4.8"
package_settings = {"Hello0": tmp_settings}
create_profile(self.client.client_cache.profiles_path,
"vs_12_86_Hello0_gcc", settings=profile_settings,
package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86_Hello0_gcc -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=gcc", info)
self.assertIn("compiler.libcxx=libstdc++11", info)
# If other package is specified compiler is not modified
package_settings = {"NoExistsRecipe": tmp_settings}
create_profile(self.client.client_cache.profiles_path,
"vs_12_86_Hello0_gcc", settings=profile_settings,
package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86_Hello0_gcc -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=Visual Studio", info)
self.assertNotIn("compiler.libcxx", info)
# Mix command line package settings with profile
package_settings = {"Hello0": tmp_settings}
create_profile(self.client.client_cache.profiles_path, "vs_12_86_Hello0_gcc",
settings=profile_settings, package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86_Hello0_gcc"
" -s compiler.version=14 -s Hello0:compiler.libcxx=libstdc++")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=gcc", info)
self.assertNotIn("compiler.libcxx=libstdc++11", info)
self.assertIn("compiler.libcxx=libstdc++", info)
def install_profile_options_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
create_profile(self.client.client_cache.profiles_path, "vs_12_86",
options=[("Hello0:language", 1),
("Hello0:static", False)])
self.client.save(files)
self.client.run("install --build missing -pr vs_12_86")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("language=1", info)
self.assertIn("static=False", info)
def scopes_env_test(self):
# Create a profile and use it
create_profile(self.client.client_cache.profiles_path, "scopes_env", settings={},
scopes={"Hello0:myscope": "1",
"ALL:otherscope": "2",
"undefined": "3"}, # undefined scope do not apply to my packages
env=[("CXX", "/path/tomy/g++"), ("CC", "/path/tomy/gcc")])
self.client.save({CONANFILE: conanfile_scope_env})
self.client.run("export lasote/stable")
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr scopes_env")
self.assertIn("Scope myscope: 1", self.client.user_io.out)
self.assertIn("Scope otherscope: 2", self.client.user_io.out)
self.assertIn("Scope undefined: None", self.client.user_io.out)
self._assert_env_variable_printed("CC", "/path/tomy/gcc")
self._assert_env_variable_printed("CXX", "/path/tomy/g++")
# The env variable shouldn't persist after install command
self.assertFalse(os.environ.get("CC", None) == "/path/tomy/gcc")
self.assertFalse(os.environ.get("CXX", None) == "/path/tomy/g++")
def test_package_test(self):
test_conanfile = '''from conans.model.conan_file import ConanFile
from conans import CMake
import os
class DefaultNameConan(ConanFile):
name = "DefaultName"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
requires = "Hello0/0.1@lasote/stable"
def build(self):
# Print environment vars
# self.run('cmake %s %s' % (self.conanfile_directory, cmake.command_line))
if self.settings.os == "Windows":
self.run('echo "My var is %ONE_VAR%"')
else:
self.run('echo "My var is $ONE_VAR"')
def test(self):
pass
'''
files = {"conanfile.py": conanfile_scope_env,
"test_package/conanfile.py": test_conanfile}
# Create a profile and use it
create_profile(self.client.client_cache.profiles_path, "scopes_env", settings={},
scopes={}, env=[("ONE_VAR", "ONE_VALUE")])
self.client.save(files)
self.client.run("test_package --profile scopes_env")
self._assert_env_variable_printed("ONE_VAR", "ONE_VALUE")
self.assertIn("My var is ONE_VALUE", str(self.client.user_io.out))
# Try now with package environment vars
create_profile(self.client.client_cache.profiles_path, "scopes_env2", settings={},
scopes={}, package_env={"DefaultName": [("ONE_VAR", "IN_TEST_PACKAGE")],
"Hello0": [("ONE_VAR", "PACKAGE VALUE")]})
self.client.run("test_package --profile scopes_env2")
self._assert_env_variable_printed("ONE_VAR", "PACKAGE VALUE")
self.assertIn("My var is IN_TEST_PACKAGE", str(self.client.user_io.out))
# Try now overriding some variables with command line
self.client.run("test_package --profile scopes_env2 "
"-e DefaultName:ONE_VAR=InTestPackageOverride "
"-e Hello0:ONE_VAR=PackageValueOverride ")
self._assert_env_variable_printed("ONE_VAR", "PackageValueOverride")
self.assertIn("My var is InTestPackageOverride", str(self.client.user_io.out))
# A global setting in command line won't override a scoped package variable
self.client.run("test_package --profile scopes_env2 -e ONE_VAR=AnotherValue")
self._assert_env_variable_printed("ONE_VAR", "PACKAGE VALUE")
def _assert_env_variable_printed(self, name, value):
self.assertIn("%s=%s" % (name, value), self.client.user_io.out)
def info_with_profiles_test(self):
self.client.run("remove '*' -f")
# Create a simple recipe to require
winreq_conanfile = '''
from conans.model.conan_file import ConanFile
class WinRequireDefaultNameConan(ConanFile):
name = "WinRequire"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
'''
files = {"conanfile.py": winreq_conanfile}
self.client.save(files)
self.client.run("export lasote/stable")
# Now require the first recipe depending on OS=windows
conanfile = '''from conans.model.conan_file import ConanFile
import os
class DefaultNameConan(ConanFile):
name = "Hello"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
def config(self):
if self.settings.os == "Windows":
self.requires.add("WinRequire/0.1@lasote/stable")
'''
files = {"conanfile.py": conanfile}
self.client.save(files)
self.client.run("export lasote/stable")
# Create a profile that doesn't activate the require
create_profile(self.client.client_cache.profiles_path, "scopes_env",
settings={"os": "Linux"},
scopes={})
# Install with the previous profile
self.client.run("info Hello/0.1@lasote/stable --profile scopes_env")
self.assertNotIn('''Requires:
WinRequire/0.1@lasote/stable''', self.client.user_io.out)
# Create a profile that activate the require
create_profile(self.client.client_cache.profiles_path, "scopes_env",
settings={"os": "Windows"},
scopes={})
# Install with the previous profile
self.client.run("info Hello/0.1@lasote/stable --profile scopes_env")
self.assertIn('''Requires:
WinRequire/0.1@lasote/stable''', self.client.user_io.out)
| mit | 1,846,403,134,146,039,600 | 41.360614 | 97 | 0.603333 | false |
luofei98/qgis | python/plugins/processing/algs/otb/maintenance/OTBTester.py | 4 | 16871 | # -*- coding: utf-8 -*-
"""
***************************************************************************
OTBTester.py
---------------------
Copyright : (C) 2013 by CS Systemes d'information (CS SI)
Email : otb at c-s dot fr (CS SI)
Contributors : Julien Malik (CS SI)
Oscar Picas (CS SI)
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Julien Malik, Oscar Picas'
__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import unittest
import ConfigParser
import io
from parsing import (
File, Command, Comment, BlankLine, Arg, parse, prettify)
from string import Template
import os
import traceback
import logging
import copy
from ConfigParser import SafeConfigParser
from processing.otb.OTBHelper import get_OTB_log
class LowerTemplate(Template):
def safe_substitute(self, param):
ret = super(LowerTemplate, self).safe_substitute(param).lower()
return ret
class MakefileParser(object):
def __init__(self):
self.maxDiff = None
self.parser = SafeConfigParser()
self.parser.read('otbcfg.ini')
if not os.path.exists('otbcfg.ini'):
raise Exception("OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified in the file otbcfg.ini")
self.root_dir = self.parser.get('otb','checkout_dir')
if not os.path.exists(self.root_dir):
raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there")
self.build_dir = self.parser.get('otb', 'build_dir')
if not os.path.exists(self.build_dir):
raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there")
self.logger = get_OTB_log()
def test_CMakelists(self):
provided = {}
provided["OTB_SOURCE_DIR"] = self.root_dir
provided["OTB_BINARY_DIR"] = self.build_dir
provided["OTB_DATA_LARGEINPUT_ROOT"] = os.path.normpath(os.path.join(self.root_dir, "../OTB-Data/Input"))
try:
with open(os.path.join(self.root_dir, "CMakeLists.txt")) as file_input:
content = file_input.read()
output = parse(content)
defined_paths = [each for each in output if 'Command' in str(type(each)) and "FIND_PATH" in each.name]
the_paths = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in defined_paths}
the_sets = [each for each in output if 'Command' in str(type(each)) and "SET" in each.name.upper()]
the_sets = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in the_sets}
the_sets = {key : " ".join(the_sets[key]) for key in the_sets}
the_strings = set([each.body[-1].contents for each in output if 'Command' in str(type(each)) and "STRING" in each.name.upper()] )
def mini_clean(item):
if item.startswith('"') and item.endswith('"') and " " not in item:
return item[1:-1]
return item
the_sets = {key : mini_clean(the_sets[key]) for key in the_sets}
def templatize(item):
if "$" in item:
return Template(item)
return item
for key in the_sets:
if key in the_strings:
the_sets[key] = the_sets[key].lower()
the_sets = {key : templatize(the_sets[key]) for key in the_sets}
for path in the_paths:
target_file = the_paths[path][1]
suggested_paths = []
if len(the_paths[path]) > 2:
suggested_paths = the_paths[path][2:]
try:
provided[path] = find_file(target_file)
except Exception, e:
for each in suggested_paths:
st = Template(each)
pac = os.path.abspath(st.safe_substitute(provided))
if os.path.exists(pac):
provided[path] = pac
break
resolve_dict(provided, the_sets)
provided.update(the_sets)
return provided
except Exception, e:
traceback.print_exc()
self.fail(e.message)
def add_make(self, previous_context, new_file):
input = open(new_file).read()
output = parse(input)
apps = [each for each in output if 'Command' in str(type(each))]
setcommands = [each for each in apps if 'SET' in each.name.upper()]
stringcommands = [each for each in apps if 'STRING' in each.name.upper()]
environment = previous_context
def mini_clean(item):
if item.startswith('"') and item.endswith('"') and " " not in item:
return item[1:-1]
return item
new_env = {}
for command in setcommands:
key = command.body[0].contents
ct = " ".join([item.contents for item in command.body[1:]])
ct = mini_clean(ct)
if "$" in ct:
values = Template(ct)
else:
values = ct
new_env[key] = values
for stringcommand in stringcommands:
key = stringcommand.body[-1].contents
ct = stringcommand.body[-2].contents
ct = mini_clean(ct.lower())
if "$" in ct:
values = LowerTemplate(ct)
else:
values = ct
new_env[key] = values
resolve_dict(environment, new_env)
environment.update(new_env)
return environment
def get_apps(self, the_makefile, the_dict):
input = open(the_makefile).read()
output = parse(input)
apps = [each for each in output if 'Command' in str(type(each))]
otb_apps = [each for each in apps if 'OTB_TEST_APPLICATION' in each.name.upper()]
return otb_apps
def get_tests(self, the_makefile, the_dict):
input = open(the_makefile).read()
output = parse(input)
apps = [each for each in output if 'Command' in str(type(each))]
otb_tests = [each for each in apps if 'ADD_TEST' in each.name.upper()]
return otb_tests
def get_apps_with_context(self, the_makefile, the_dict):
input = open(the_makefile).read()
output = parse(input)
def is_a_command(item):
return 'Command' in str(type(item))
appz = []
context = []
for each in output:
if is_a_command(each):
if 'FOREACH' in each.name and 'ENDFOREACH' not in each.name:
args = [item.contents for item in each.body]
context.append(args)
elif 'ENDFOREACH' in each.name:
context.pop()
elif 'OTB_TEST_APPLICATION' in each.name.upper():
appz.append((each, context[:]))
return appz
def get_name_line(self, the_list, the_dict):
items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
itemz = [[], [], [], [], []]
last_index = 0
for each in the_list:
if each.contents in items:
last_index = items.index(each.contents)
else:
itemz[last_index].append(each.contents)
result = itemz[0][0]
the_string = Template(result).safe_substitute(the_dict)
if '$' in the_string:
neo_dict = the_dict
the_string = Template(the_string).safe_substitute(neo_dict)
while '$' in the_string:
try:
the_string = Template(the_string).substitute(neo_dict)
except KeyError, e:
self.logger.warning("Key %s is not found in makefiles" % e.message)
neo_dict[e.message] = ""
if 'string.Template' in the_string:
raise Exception("Unexpected toString call in %s" % the_string)
return the_string
def get_command_line(self, the_list, the_dict):
items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
itemz = [[], [], [], [], []]
last_index = 0
for each in the_list:
if each.contents in items:
last_index = items.index(each.contents)
else:
itemz[last_index].append(each.contents)
result = []
result.extend(["otbcli_%s" % each for each in itemz[1]])
if len(result[0]) == 7:
raise Exception("App name is empty !")
result.extend(itemz[2])
result.append("-testenv")
result.extend(itemz[3])
the_string = Template(" ".join(result)).safe_substitute(the_dict)
if '$' in the_string:
neo_dict = the_dict
the_string = Template(" ".join(result)).safe_substitute(neo_dict)
while '$' in the_string:
try:
the_string = Template(the_string).substitute(neo_dict)
except KeyError, e:
self.logger.warning("Key %s is not found in makefiles" % e.message)
neo_dict[e.message] = ""
if 'string.Template' in the_string:
raise Exception("Unexpected toString call in %s" % the_string)
return the_string
def get_test(self, the_list, the_dict):
items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
itemz = [[], [], [], [], []]
last_index = 0
for each in the_list:
if each.contents in items:
last_index = items.index(each.contents)
else:
itemz[last_index].append(each.contents)
result = ["otbTestDriver"]
result.extend(itemz[4])
if len(result) == 1:
return ""
the_string = Template(" ".join(result)).safe_substitute(the_dict)
if '$' in the_string:
neo_dict = the_dict
the_string = Template(" ".join(result)).safe_substitute(neo_dict)
while '$' in the_string:
try:
the_string = Template(the_string).substitute(neo_dict)
except KeyError, e:
self.logger.warning("Key %s is not found in makefiles" % e.message)
neo_dict[e.message] = ""
if 'string.Template' in the_string:
raise Exception("Unexpected toString call in %s" % the_string)
return the_string
def test_algos(self):
tests = {}
algos_dir = os.path.join(self.root_dir, "Testing/Applications")
makefiles = find_files("CMakeLists.txt", algos_dir)
to_be_excluded = os.path.join(self.root_dir, "Testing/Applications/CMakeLists.txt")
if to_be_excluded in makefiles:
makefiles.remove(to_be_excluded)
resolve_algos = {}
for makefile in makefiles:
intermediate_makefiles = []
path = makefile.split(os.sep)[len(self.root_dir.split(os.sep)):-1]
for ind in range(len(path)):
tmp_path = path[:ind+1]
tmp_path.append("CMakeLists.txt")
tmp_path = os.sep.join(tmp_path)
candidate_makefile = os.path.join(self.root_dir, tmp_path)
if os.path.exists(candidate_makefile):
intermediate_makefiles.append(candidate_makefile)
resolve_algos[makefile] = intermediate_makefiles
dict_for_algo = {}
for makefile in makefiles:
basic = self.test_CMakelists()
last_context = self.add_make(basic, os.path.join(self.root_dir, "Testing/Utilities/CMakeLists.txt"))
for intermediate_makefile in resolve_algos[makefile]:
last_context = self.add_make(last_context, intermediate_makefile)
dict_for_algo[makefile] = last_context
for makefile in makefiles:
appz = self.get_apps_with_context(makefile, dict_for_algo[makefile])
for app, context in appz:
if len(context) == 0:
import copy
ddi = copy.deepcopy(dict_for_algo[makefile])
tk_dict = autoresolve(ddi)
tk_dict = autoresolve(tk_dict)
name_line = self.get_name_line(app.body, tk_dict)
command_line = self.get_command_line(app.body, tk_dict)
test_line = self.get_test(app.body, tk_dict)
if '$' in test_line or '$' in command_line:
if '$' in command_line:
self.logger.error(command_line)
if '$' in test_line:
self.logger.warning(test_line)
else:
tests[name_line] = (command_line, test_line)
else:
contexts = {}
for iteration in context:
key = iteration[0]
values = [each[1:-1].lower() for each in iteration[1:]]
contexts[key] = values
keyorder = contexts.keys()
import itertools
pool = [each for each in itertools.product(*contexts.values())]
import copy
for poolinstance in pool:
neo_dict = copy.deepcopy(dict_for_algo[makefile])
zipped = zip(keyorder, poolinstance)
for each in zipped:
neo_dict[each[0]] = each[1]
ak_dict = autoresolve(neo_dict)
ak_dict = autoresolve(ak_dict)
ak_dict = autoresolve(ak_dict)
ddi = ak_dict
name_line = self.get_name_line(app.body, ddi)
command_line = self.get_command_line(app.body, ddi)
test_line = self.get_test(app.body, ddi)
if '$' in command_line or '$' not in test_line:
if '$' in command_line:
self.logger.error(command_line)
if '$' in test_line:
self.logger.warning(test_line)
else:
tests[name_line] = (command_line, test_line)
return tests
def autoresolve(a_dict):
def as_template(item, b_dict):
if hasattr(item, 'safe_substitute'):
return item.safe_substitute(b_dict)
ate = Template(item)
return ate.safe_substitute(b_dict)
templatized = {key: as_template(a_dict[key], a_dict) for key in a_dict.keys() }
return templatized
def find_file(file_name, base_dir = os.curdir):
import os
for root, dirs, files in os.walk(base_dir, topdown=False):
for name in files:
if name == file_name:
return os.path.join(root, name)
raise Exception("File not found %s" % file_name)
def find_files(file_name, base_dir = os.curdir):
import os
result = []
for root, dirs, files in os.walk(base_dir, topdown=False):
for name in files:
if name == file_name:
result.append(os.path.join(root, name))
return result
def resolve_dict(adia, adib):
init = len(adia)
fin = len(adia) + 1
def _resolve_dict(dia, dib):
for key in dib:
cand_value = dib[key]
if hasattr(cand_value, 'safe_substitute'):
value = cand_value.safe_substitute(dia)
if type(value) == type(".") and "$" not in value:
dia[key] = value
else:
dia[key] = cand_value
for key in dia:
if key in dib:
del dib[key]
while(init != fin):
init = len(adia)
_resolve_dict(adia, adib)
fin = len(adia)
| gpl-2.0 | 789,516,047,254,340,100 | 38.053241 | 145 | 0.514433 | false |
aewallin/opencamlib | examples/python/old/kdtree_tst.py | 1 | 5359 | import ocl as cam
import camvtk
import time
import vtk
import datetime
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
myscreen.setAmbient(1,1,1)
#stl = camvtk.STLSurf(filename="demo.stl")
stl = camvtk.STLSurf(filename="demo2.stl")
print("STL surface read")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.8))
#stl.SetFlat()
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STLSurf with ", s.size(), " triangles")
cutterDiameter=20
cutter = cam.CylCutter(cutterDiameter, 5)
#print cutter.str()
#print cc.type
minx=20
dx=15
maxx=130
miny=-70
dy=1
maxy=50
z=-10
bucketSize = 1
#pftp = cam.ParallelFinish()
#pftp.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
#pftp.initSTLSurf(s, bucketSize)
#pftp.dropCutterSTL1(cutter)
#print " made ", pftp.dcCalls, " drop-cutter calls"
#exit
pf2 = cam.ParallelFinish()
pf2.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
pf2.initSTLSurf(s, bucketSize)
pf2.dropCutterSTL2(cutter)
print(" made ", pf2.dcCalls, " drop-cutter calls")
#clpoints = pftp.getCLPoints()
#ccpoints = pftp.getCCPoints()
clpoints = pf2.getCLPoints()
ccpoints = pf2.getCCPoints()
#CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
nv=0
nn=0
ne=0
nf=0
myscreen.camera.SetPosition(3, 300, 200)
myscreen.camera.SetFocalPoint(75, 0, 0)
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
t3 = camvtk.Text()
t3.SetPos( (30, 30))
myscreen.addActor( t3)
t4 = camvtk.Text()
t4.SetPos( (30, myscreen.height-60))
myscreen.addActor( t4)
n=0
precl = cam.Point()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
lwr.SetFileName("tux1.png")
for cl,cc in zip(clpoints,ccpoints):
camEye = myscreen.camera.GetFocalPoint()
camPos = myscreen.camera.GetPosition()
postext = "(%3.3f, %3.3f, %3.3f)" % (camPos[0], camPos[1], camPos[2])
eyetext = "(%3.3f, %3.3f, %3.3f)" % (camEye[0], camEye[1], camEye[2])
camtext = "Camera LookAt: "+eyetext+"\nCamera Pos: "+ postext
t4.SetText(camtext)
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
xtext = "%3.3f" % cl.x
ytext = "%3.3f" % cl.y
ztext = "%3.3f" % cl.z
t2.SetText( "X: " + xtext + "\nY: " + ytext + "\nZ: " + ztext )
if cc.type==cam.CCType.FACET:
nf+=1
col = (0,1,1)
elif cc.type == cam.CCType.VERTEX:
nv+=1
col = (0,1,0)
elif cc.type == cam.CCType.EDGE:
ne+=1
col = (1,0,0)
elif cc.type == cam.CCType.NONE:
#print "type=NONE!"
nn+=1
col = (1,1,1)
#if cl.isInside(t):
# col = (0, 1, 0)
#else:
# col = (1, 0, 0)
trilist = pf2.getTrianglesUnderCutter(cl, cutter)
#print "at cl=", cl.str() , " where len(trilist)=", len(trilist)
t3.SetText("Total Triangles: "+ str(s.size()) +"\nUnder Cutter (red): "+str(len(trilist)))
stl2 = camvtk.STLSurf(filename=None, triangleList=trilist, color=(1,0,0)) # a new surface with only triangles under cutter
stl2.SetWireframe()
#stl2.SetFlat()
myscreen.addActor(stl2)
trilist=[]
cutactor = camvtk.Cylinder(center=(cl.x,cl.y,cl.z), radius=cutterDiameter/2, height=20, color=(0.7,0,1))
myscreen.addActor( cutactor )
#myscreen.addActor( camvtk.Point(center=(cl.x,cl.y,cl.z) , color=col) )
if n==0:
precl = cl
else:
d = cl-precl
if (d.norm() < 90):
myscreen.addActor( camvtk.Line( p1=(precl.x, precl.y, precl.z), p2=(cl.x, cl.y, cl.z), color=(0,1,1) ) )
precl = cl
n=n+1
#myscreen.addActor( camvtk.Point(center=(cl2.x,cl2.y,cl2.z+0.2) , color=(0.6,0.2,0.9)) )
#myscreen.addActor( camvtk.Point(center=(cc.x,cc.y,cc.z), color=col) )
#print cc.type
myscreen.camera.Azimuth( 1 )
#time.sleep(0.01)
myscreen.render()
w2if.Modified()
lwr.SetFileName("frames/kdbetter"+ ('%05d' % n)+".png")
#lwr.Write()
#raw_input("Press Enter to continue")
myscreen.removeActor(stl2)
myscreen.removeActor( cutactor )
print("none=",nn," vertex=",nv, " edge=",ne, " facet=",nf, " sum=", nn+nv+ne+nf)
print(len(clpoints), " cl points evaluated")
#lwr.Write()
for n in range(1,36):
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.camera.Azimuth( 1 )
time.sleep(0.01)
myscreen.render()
lwr.SetFileName("kd_frame"+ ('%03d' % n)+".png")
w2if.Modified()
#lwr.Write()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
| lgpl-2.1 | 727,688,251,211,723,800 | 28.284153 | 130 | 0.549543 | false |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/django/forms/fields.py | 35 | 47161 | """
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import uuid
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
from django.forms.boundfield import BoundField
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,
DateTimeInput, EmailInput, HiddenInput, MultipleHiddenInput,
NullBooleanSelect, NumberInput, Select, SelectMultiple,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,
URLInput,
)
from django.utils import formats, six
from django.utils.dateparse import parse_duration
from django.utils.deprecation import (
RemovedInDjango110Warning, RenameMethodsBase,
)
from django.utils.duration import duration_string
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'DurationField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField',
)
class RenameFieldMethods(RenameMethodsBase):
renamed_methods = (
('_has_changed', 'has_changed', RemovedInDjango110Warning),
)
class Field(six.with_metaclass(RenameFieldMethods, object)):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False, disabled=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# disabled -- Boolean that specifies whether the field is disabled, that
# is its widget is shown in the form but not editable.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.disabled = disabled
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
if self.disabled:
return initial
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
return self._coerce(data) != self._coerce(initial)
except ValidationError:
return True
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it with ''.
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return initial_value != data_value
def get_bound_field(self, form, field_name):
"""
Return a BoundField instance that will be used when accessing the form
field in a template.
"""
return BoundField(form, self, field_name)
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, strip=True, *args, **kwargs):
self.max_length = max_length
self.min_length = min_length
self.strip = strip
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
value = force_text(value)
if self.strip:
value = value.strip()
return value
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
re_decimal = re.compile(r'\.0*\s*$')
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub('', str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
self.validators.append(validators.DecimalValidator(max_digits, decimal_places))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class DurationField(Field):
default_error_messages = {
'invalid': _('Enter a valid duration.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
value = parse_duration(value)
if value is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
kwargs.setdefault('strip', False)
# error_message is just kept for backwards compatibility:
if error_message is not None:
warnings.warn(
"The 'error_message' argument is deprecated. Use "
"Field.error_messages['invalid'] instead.",
RemovedInDjango110Warning, stacklevel=2
)
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
def has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class CallableChoiceIterator(object):
def __init__(self, choices_func):
self.choices_func = choices_func
def __iter__(self):
for e in self.choices_func():
yield e
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def __init__(self, *args, **kwargs):
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
class UUIDField(CharField):
default_error_messages = {
'invalid': _('Enter a valid UUID.'),
}
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def to_python(self, value):
value = super(UUIDField, self).to_python(value)
if value in self.empty_values:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
| mit | 4,796,067,627,579,341,000 | 37.187045 | 110 | 0.598121 | false |
hmendozap/master-arbeit-projects | autosk_dev_test/component/LinReg.py | 1 | 8756 | import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LinReg(AutoSklearnRegressionAlgorithm):
def __init__(self, number_updates, batch_size, dropout_output,
learning_rate, solver, lambda2,
momentum=0.99, beta1=0.9, beta2=0.9, rho=0.95,
lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=2,
random_state=None):
self.number_updates = number_updates
self.batch_size = batch_size
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.lambda2 = lambda2
self.momentum = momentum
self.beta1 = 1-beta1 if beta1 is not None else 0.9
self.beta2 = 1-beta2 if beta2 is not None else 0.99
self.rho = rho
self.solver = solver
self.gamma = gamma
self.power = power
self.epoch_step = epoch_step
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isregression = True
self.m_isbinary = False
self.m_ismultilabel = False
self.estimator = None
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
epoch = (self.number_updates * self.batch_size)//X.shape[0]
number_epochs = min(max(2, epoch), 110) # Cap the max number of possible epochs
from implementation import LogisticRegression
self.estimator = LogisticRegression.LogisticRegression(batch_size=self.batch_size,
input_shape=self.input_shape,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'lin_reg',
'name': 'Linear Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
policy_choices = ['fixed', 'inv', 'exp', 'step']
batch_size = UniformIntegerHyperparameter("batch_size",
100, 3000,
log=True,
default=150)
number_updates = UniformIntegerHyperparameter("number_updates",
500, 10500,
log=True,
default=500)
dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99,
default=0.5)
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 0.1,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2,
log=True,
default=1e-3)
solver = CategoricalHyperparameter(name="solver",
choices=["sgd", "adam"],
default="sgd")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs = ConfigurationSpace()
cs.add_hyperparameter(number_updates)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(dropout_output)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=['inv', 'exp', 'step'])
power_depends_on_policy = EqualsCondition(power, lr_policy, 'inv')
epoch_step_depends_on_policy = EqualsCondition(epoch_step,
lr_policy, 'step')
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| mit | -3,494,951,040,575,415,000 | 43.446701 | 102 | 0.470991 | false |
idnael/ctxsearch | ctxsearch/_termi/_termi_encoding.py | 1 | 4460 | #!/usr/bin/python
# TerminatorEncoding - charset encoding classes
# Copyright (C) 2006-2008 [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""TerminatorEncoding by Emmanuel Bretelle <[email protected]>
TerminatorEncoding supplies a list of possible encoding
values.
This list is taken from gnome-terminal's src/encoding.h
and src/encoding.c
"""
from terminatorlib import translation
class TerminatorEncoding:
"""Class to store encoding details"""
encodings = [
[True, None, _("Current Locale")],
[False, "ISO-8859-1", _("Western")],
[False, "ISO-8859-2", _("Central European")],
[False, "ISO-8859-3", _("South European") ],
[False, "ISO-8859-4", _("Baltic") ],
[False,"ISO-8859-5", _("Cyrillic") ],
[False, "ISO-8859-6", _("Arabic") ],
[False, "ISO-8859-7", _("Greek") ],
[False, "ISO-8859-8", _("Hebrew Visual") ],
[False, "ISO-8859-8-I", _("Hebrew") ],
[False, "ISO-8859-9", _("Turkish") ],
[False, "ISO-8859-10", _("Nordic") ],
[False, "ISO-8859-13", _("Baltic") ],
[False, "ISO-8859-14", _("Celtic") ],
[False, "ISO-8859-15", _("Western") ],
[False, "ISO-8859-16", _("Romanian") ],
[False, "UTF-7", _("Unicode") ],
[False, "UTF-8", _("Unicode") ],
[False, "UTF-16", _("Unicode") ],
[False, "UCS-2", _("Unicode") ],
[False, "UCS-4", _("Unicode") ],
[False, "ARMSCII-8", _("Armenian") ],
[False, "BIG5", _("Chinese Traditional") ],
[False, "BIG5-HKSCS", _("Chinese Traditional") ],
[False, "CP866", _("Cyrillic/Russian") ],
[False, "EUC-JP", _("Japanese") ],
[False, "EUC-KR", _("Korean") ],
[False, "EUC-TW", _("Chinese Traditional") ],
[False, "GB18030", _("Chinese Simplified") ],
[False, "GB2312", _("Chinese Simplified") ],
[False, "GBK", _("Chinese Simplified") ],
[False, "GEORGIAN-PS", _("Georgian") ],
[False, "HZ", _("Chinese Simplified") ],
[False, "IBM850", _("Western") ],
[False, "IBM852", _("Central European") ],
[False, "IBM855", _("Cyrillic") ],
[False, "IBM857", _("Turkish") ],
[False, "IBM862", _("Hebrew") ],
[False, "IBM864", _("Arabic") ],
[False, "ISO2022JP", _("Japanese") ],
[False, "ISO2022KR", _("Korean") ],
[False, "ISO-IR-111", _("Cyrillic") ],
[False, "JOHAB", _("Korean") ],
[False, "KOI8-R", _("Cyrillic") ],
[False, "KOI8-U", _("Cyrillic/Ukrainian") ],
[False, "MAC_ARABIC", _("Arabic") ],
[False, "MAC_CE", _("Central European") ],
[False, "MAC_CROATIAN", _("Croatian") ],
[False, "MAC-CYRILLIC", _("Cyrillic") ],
[False, "MAC_DEVANAGARI", _("Hindi") ],
[False, "MAC_FARSI", _("Persian") ],
[False, "MAC_GREEK", _("Greek") ],
[False, "MAC_GUJARATI", _("Gujarati") ],
[False, "MAC_GURMUKHI", _("Gurmukhi") ],
[False, "MAC_HEBREW", _("Hebrew") ],
[False, "MAC_ICELANDIC", _("Icelandic") ],
[False, "MAC_ROMAN", _("Western") ],
[False, "MAC_ROMANIAN", _("Romanian") ],
[False, "MAC_TURKISH", _("Turkish") ],
[False, "MAC_UKRAINIAN", _("Cyrillic/Ukrainian") ],
[False, "SHIFT-JIS", _("Japanese") ],
[False, "TCVN", _("Vietnamese") ],
[False, "TIS-620", _("Thai") ],
[False, "UHC", _("Korean") ],
[False, "VISCII", _("Vietnamese") ],
[False, "WINDOWS-1250", _("Central European") ],
[False, "WINDOWS-1251", _("Cyrillic") ],
[False, "WINDOWS-1252", _("Western") ],
[False, "WINDOWS-1253", _("Greek") ],
[False, "WINDOWS-1254", _("Turkish") ],
[False, "WINDOWS-1255", _("Hebrew") ],
[False, "WINDOWS-1256", _("Arabic") ],
[False, "WINDOWS-1257", _("Baltic") ],
[False, "WINDOWS-1258", _("Vietnamese") ]
]
def __init__(self):
pass
def get_list():
"""Return a list of supported encodings"""
return TerminatorEncoding.encodings
get_list = staticmethod(get_list)
| gpl-2.0 | 6,860,210,027,241,304,000 | 37.782609 | 79 | 0.576457 | false |
kbrose/project_euler | p90-99/p91.py | 1 | 1360 | grid_size = 50 # assumes a square grid
counter = 3 * (grid_size * grid_size) # all cases that won't be covered
def gcd(a,b):
while (b != 0) and (a != b) and (a != 0):
if b < a:
a = a - b
else:
b = b - a
if a > 0:
return a
return b
end = grid_size+1
def my_append(arr, item):
if item in arr:
return 0
arr.append(item)
return 1
triangles = []
for x in xrange(1,end):
for y in xrange(1,end):
GCD = gcd(x,y)
if GCD > 0:
slope = [x / GCD, y / GCD]
inv_slope = [y / GCD, -(x / GCD)]
else:
slope = [x,y]
inv_slope = [y,-x]
out_of_bounds_left = 0
out_of_bounds_right = 0
for mult in xrange(1,51):
x_inc = mult*inv_slope[0]
y_inc = mult*inv_slope[1]
if ((x - x_inc < 0) or (y - y_inc > grid_size)):
out_of_bounds_left = 1
else:
counter += my_append(triangles, [[0,0],[x,y],[x-x_inc,y-y_inc]])
if ((x + x_inc > grid_size) or (y + y_inc < 0)):
out_of_bounds_right = 1
else:
counter += my_append(triangles, [[0,0],[x,y],[x+x_inc,y+y_inc]])
if (out_of_bounds_left and out_of_bounds_right):
break
print counter
| unlicense | 557,626,984,125,605,100 | 26.2 | 80 | 0.447059 | false |
LIMXTEC/BitSend | qa/rpc-tests/mempool_reorg.py | 3 | 4514 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitsendTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| mit | 7,167,700,610,357,613,000 | 43.693069 | 122 | 0.652636 | false |
piru/letsencrypt | letsencrypt/tests/display/enhancements_test.py | 53 | 1703 | """Module for enhancement UI."""
import logging
import unittest
import mock
from letsencrypt import errors
from letsencrypt.display import util as display_util
class AskTest(unittest.TestCase):
"""Test the ask method."""
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, enhancement):
from letsencrypt.display.enhancements import ask
return ask(enhancement)
@mock.patch("letsencrypt.display.enhancements.util")
def test_redirect(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call("redirect"))
def test_key_error(self):
self.assertRaises(errors.Error, self._call, "unknown_enhancement")
class RedirectTest(unittest.TestCase):
"""Test the redirect_by_default method."""
@classmethod
def _call(cls):
from letsencrypt.display.enhancements import redirect_by_default
return redirect_by_default()
@mock.patch("letsencrypt.display.enhancements.util")
def test_secure(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call())
@mock.patch("letsencrypt.display.enhancements.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertFalse(self._call())
@mock.patch("letsencrypt.display.enhancements.util")
def test_easy(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertFalse(self._call())
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 | 8,566,637,641,327,471,000 | 28.877193 | 74 | 0.67293 | false |
canwe/NewsBlur | apps/rss_feeds/migrations/0046_remove_feedhistory.py | 18 | 5828 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'FeedUpdateHistory'
db.delete_table('rss_feeds_feedupdatehistory')
def backwards(self, orm):
# Adding model 'FeedUpdateHistory'
db.create_table('rss_feeds_feedupdatehistory', (
('number_of_feeds', self.gf('django.db.models.fields.IntegerField')()),
('average_per_feed', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=1)),
('fetch_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('seconds_taken', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('rss_feeds', ['FeedUpdateHistory'])
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_classifier_counts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['rss_feeds']
| mit | 6,685,264,770,255,732,000 | 69.216867 | 151 | 0.562972 | false |
sumeetsk/NEXT-1 | next/assistant/pijemont/verifier.py | 3 | 7768 | import yaml, json
import random
import traceback
import sys
import os
import next.utils as utils
DICT = {'dict','dictionary','map'}
LIST = {'list'}
TUPLE = {'tuple'}
ONEOF = {'oneof'}
NUM = {'num','number','float'}
STRING = {'str','string','multiline'}
ANY = {'any','stuff'}
FILE = {'file'}
BOOL = {'boolean','bool'}
def load_doc(filename,base_path):
errs = []
with open(filename) as f:
ref = yaml.load(f.read())
ds = []
for ext in ref.pop('extends',[]):
r,e = load_doc(base_path+ext,base_path)
ds += [r]
errs += e
for d in ds:
ref = merge_dict(ref, d)
errs = check_format(ref,'args' in ref[list(ref.keys())[0]])
return ref,errs
def merge_dict(d1,d2,prefer=1):
for k in d2:
if k in d1:
if type(d1[k]) == dict:
d1[k] = merge_dict(d1[k],d2[k])
if prefer == 2:
d1[k] = d2[k]
else:
d1[k] = d2[k]
return d1
def check_format(doc,rets=True):
errs = []
if rets:
for x in doc:
if 'args' in doc[x]:
errs += check_format_helper({'type':'dict','values':doc[x]['args']},'args/'+x)
if 'rets' in doc[x]:
errs += check_format_helper({'type':'dict','values':doc[x]['rets']},'rets/'+x)
else:
for x in doc:
errs += check_format_helper(doc[x],x)
return errs
def check_format_helper(doc,name):
errs = []
if not 'type' in doc:
errs += ['{}: "type" key missing'.format(name)]
diff = set(doc.keys()) - {'type','description','values','optional','default'}
if len(diff) > 0:
errs += ["{}: extra keys in spec: {}".format(name,", ".join(list(diff)))]
if not doc['type'] in DICT | LIST | TUPLE | ONEOF | NUM | STRING | BOOL | ANY | FILE:
errs += ['{}: invlid type: {}'.format(name, doc['type'])]
if doc['type'] in DICT | LIST | TUPLE | ONEOF and not 'values' in doc:
errs += ['{}: requires "values" key'.format(name)]
if len(errs) > 0:
return errs
if doc['type'] in DICT:
for x in doc['values']:
errs += check_format_helper(doc['values'][x],'{}/{}'.format(name,x))
elif doc['type'] in LIST:
errs += check_format_helper(doc['values'],'{}/values'.format(name))
elif doc['type'] in TUPLE:
for x in doc['values']:
errs += check_format_helper(doc['values'][x],'{}/{}'.format(name,str(x)))
elif doc['type'] in ONEOF:
for x in doc['values']:
errs += check_format_helper(doc['values'][x],'{}/{}'.format(name,str(x)))
return errs
def verify(input_dict, reference_dict):
"""
Returns: modified_input, success, list_of_errors
where:
- modified_input is the input populated with default values where applicable
- success is a boolean true if there were no problems and false otherwise
- list_of_errors is as in verify_helper
"""
input_dict, messages = verify_helper("", input_dict, {'type':'dict','values':reference_dict})
try:
if len(messages)>0:
raise Exception("Failed to verify: {}".format(messages))
else:
return input_dict
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("Exception: {} {}".format(error, traceback.format_exc()))
traceback.print_tb(exc_traceback)
raise Exception(error)
def verify_helper(name, input_element, reference_dict):
"""
Returns: modified_input,list_of_errors
where:
- modified_input is the input populated with default values
- list_of_errors is: [{name: name, message: ...}, ...]
"""
ans = []
if reference_dict['type'] in DICT:
if not isinstance(input_element, (dict)):
ans += [{"name":name, "message":"invalid dict"}]
else:
l1,l2 = compare_dict_keys(input_element, reference_dict['values'])
if len(l1) > 0:
ans += [{"name":name, "message":"extra keys in input: " + ",".join(l1)}]
else:
ok = True
for k in l2:
if 'default' in reference_dict['values'][k]:
input_element[k] = reference_dict['values'][k]['default']
if reference_dict['values'][k]['type'] in NUM:
input_element[k] = float(input_element[k])
elif (not 'optional' in reference_dict['values'][k]) or reference_dict['values'][k]['optional'] == False:
ans += [{"name":name+'/'+k, "message":"required key is absent"}]
ok = False
if(ok):
for k in input_element:
input_element[k], temp_ans = verify_helper(name + '/' + k, input_element[k], reference_dict['values'][str(k)])
ans += temp_ans
elif reference_dict['type'] in LIST:
if not isinstance(input_element, (list)):
ans += [{"name":name, "message":"invalid list"}]
else:
for i in range(len(input_element)):
input_element[i],temp_ans = verify_helper(name+'/'+str(i), input_element[i], reference_dict['values'])
ans += temp_ans
elif reference_dict['type'] in TUPLE:
if not isinstance(input_element, (list,tuple)):
ans += [{"name":name, "message":"invalid tuple"}]
else:
new_tuple = list(input_element)
for i in range(len(input_element)):
new_tuple[i], temp_ans = verify_helper(name+'/'+str(i), input_element[i], reference_dict['values'][i])
ans += temp_ans
new_tuple = tuple(new_tuple)
elif reference_dict['type'] in BOOL:
if not isinstance(input_element, (bool)):
ans += [{"name":name, "message":"invalid boolean"}]
elif reference_dict['type'] in NUM:
if not isinstance(input_element, (int, long, float)):
ans += [{"name":name, "message":"invalid number"}]
elif reference_dict['type'] in STRING:
if not isinstance(input_element, (str, unicode)):
ans += [{"name":name, "message":"expected a string, got {}".format(type(input_element))}]
elif 'values' in reference_dict and not input_element in reference_dict['values']:
ans += [{"name":name, "message":"argument must be one of the specified strings: "+", ".join(reference_dict['values'])}]
elif reference_dict['type'] in ONEOF:
count = 0
for k in reference_dict['values']:
if k in input_element:
count += 1
if count > 1:
ans += [{"name":name+"/"+k,"message":"More than one argument specified for 'oneof arg: " + name}]
if count == 0:
if 'default' in reference_dict:
input_element = reference_dict['default']
else:
ans += [{"name":name, "message":"no argument provided for 'oneof' arg"}]
elif reference_dict['type'] in ANY | FILE:
pass
else:
ans += [{"name":name, "message":"invalid type: {}".format(reference_dict['type'])}]
return input_element,ans
def compare_dict_keys(d1, d2):
"""
Returns [things in d1 not in d2, things in d2 not in d1]
"""
return [k for k in d1 if not k in d2], [k for k in d2 if not k in d1]
if __name__ == '__main__':
if len(sys.argv) > 1:
r,e = load_doc(sys.argv[1])
print('doc',r)
print('errs',e)
if len(sys.argv) > 2:
i,e = verify(sys.argv[2],r)
print("Errors",e)
print("Verified input",i)
| apache-2.0 | -2,925,835,634,634,915,000 | 34.47032 | 134 | 0.529609 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.