repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sagiss/sardana | src/sardana/spock/magic.py | 1 | 9633 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
##
## This file is part of Sardana
##
## http://www.sardana-controls.org/
##
## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
## Sardana is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Sardana is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""Initial magic commands and hooks for the spock IPython environment"""
__all__ = ['expconf', 'showscan', 'spsplot', 'debug_completer',
'debug', 'www',
'post_mortem', 'macrodata', 'edmac', 'spock_late_startup_hook',
'spock_pre_prompt_hook']
from .genutils import page, get_door, get_macro_server, ask_yes_no, arg_split
from .genutils import MSG_DONE, MSG_FAILED
from .genutils import get_ipapi
def expconf(self, parameter_s=''):
"""Launches a GUI for configuring the environment variables
for the experiments (scans)"""
try:
from sardana.taurus.qt.qtgui.extra_sardana import ExpDescriptionEditor
except:
print "Error importing ExpDescriptionEditor " \
"(hint: is taurus extra_sardana installed?)"
return
try:
doorname = get_door().name()
except TypeError:
# TODO: For Taurus 4 adaptation
doorname = get_door().fullname
#===========================================================================
## ugly hack to avoid ipython/qt thread problems #e.g. see
## https://sourceforge.net/p/sardana/tickets/10/
## this hack does not allow inter-process communication and leaves the
## widget open after closing spock
## @todo: investigate cause of segfaults when using launching qt widgets from ipython
#
# w = ExpDescriptionEditor(door=doorname)
# w.show() #launching it like this, produces the problem of https://sourceforge.net/p/sardana/tickets/10/
import subprocess
import sys
fname = sys.modules[ExpDescriptionEditor.__module__].__file__
args = ['python', fname, doorname]
subprocess.Popen(args)
# ===========================================================================
def showscan(self, parameter_s=''):
"""Shows a scan in a GUI.
:param scan_id: scan number [default: None, meaning show last scan]"""
params = parameter_s.split()
door = get_door()
online, scan_nb = False, None
if len(params) > 0:
if params[0].lower() == 'online':
msg = 'To see the scans online, launch "expconf" and ' + \
'enable the plots from the "plots" button ' + \
'(top-right in the first tab)'
print msg
return
# show the scan plot, ignoring the plot configuration
elif params[0].lower() == 'online_raw':
online = True
else:
scan_nb = int(params[0])
door.show_scan(scan_nb, online=online)
def spsplot(self, parameter_s=''):
get_door().plot()
def debug_completer(self, event):
# calculate parameter index
param_idx = len(event.line.split()) - 1
if not event.line.endswith(' '):
param_idx -= 1
if param_idx == 0:
return ('off', 'on')
def debug(self, parameter_s=''):
"""Activate/Deactivate macro server debug output"""
params = parameter_s.split()
door = get_door()
if len(params) == 0:
s = door.getDebugMode() and 'on' or 'off'
print "debug mode is %s" % s
return
elif len(params) == 1:
s = params[0].lower()
if not s in ('off', 'on'):
print "Usage: debug [on|off]"
return
door.setDebugMode(s == 'on')
print "debug mode is now %s" % s
else:
print "Usage: debug [on|off]"
def www(self, parameter_s=''):
"""What went wrong. Prints the error message from the last macro execution"""
import PyTango
door = get_door()
try:
last_macro = door.getLastRunningMacro()
if last_macro is None:
door.writeln("No macro ran from this console yet!")
return
if not hasattr(last_macro, 'exc_stack') or last_macro.exc_stack is None:
door.writeln("Sorry, but no exception occurred running last " \
"macro (%s)." % last_macro.name)
return
exc = "".join(last_macro.exc_stack)
door.write(exc)
except Exception, e:
door.writeln("Unexpected exception occurred executing www:",
stream=door.Error)
door.writeln(str(e), stream=door.Error)
import traceback
traceback.print_exc()
def post_mortem(self, parameter_s='', from_www=False):
"""Post mortem analysis. Prints the local stream buffer. If no stream is
specified, it reads 'debug' stream. Valid values are output, critical,
error, warning, info, debug, result"""
params = parameter_s.split() or ['debug']
door = get_door()
logger = door.getLogObj(params[0])
msg = ""
if not from_www:
try:
msg = "\n".join(logger.read(cache=False).value)
except:
from_www = True
if from_www:
msg = "------------------------------\n" \
"Server is offline.\n" \
"This is a post mortem analysis\n" \
"------------------------------\n"
msg += "\n".join(logger.getLogBuffer())
page(msg)
def macrodata(self, parameter_s=''):
"""macrodata
Returns the data produced by the last macro"""
door = get_door()
macro_data = door.read_attribute("RecordData")
from taurus.core.util.codecs import CodecFactory
factory = CodecFactory()
data = factory.decode(macro_data.value)
return data
def edmac(self, parameter_s=''):
"""edmac <macro name> [<module>]
Returns the contents of the macro file which contains the macro code for
the given macro name. If the module is given and it does not exist a new
one is created. If the given module is a simple module name and it does
not exist, it will be created on the first directory mentioned in the
MacroPath"""
import os
import tempfile
import PyTango
ms = get_macro_server()
pars = arg_split(parameter_s, posix=True)
if len(pars) == 1:
macro_name = pars[0]
is_new_macro = False
else:
is_new_macro = True
macro_name, macro_lib = pars
macro_info_obj = ms.getMacroInfoObj(macro_name)
if not is_new_macro:
if macro_info_obj is None:
print "Macro '%s' could not be found" % macro_name
return
macro_lib = macro_info_obj.module
if is_new_macro:
if macro_info_obj is not None:
msg = ('Do you want to create macro "%s" in module "%s" that will'
' override the already existing macro in module "%s"'
% (macro_name, macro_lib, macro_info_obj.module))
if not ask_yes_no(msg, 'y'):
print "Aborting edition..."
return
macro_info = (macro_lib, macro_name)
print 'Opening %s.%s...' % macro_info
try:
remote_fname, code, line_nb = ms.GetMacroCode(macro_info)
except PyTango.DevFailed, e:
PyTango.Except.print_exception(e)
return
fd, local_fname = tempfile.mkstemp(prefix='spock_%s_' % pars[0],
suffix='.py', text=True)
os.write(fd, code)
os.close(fd)
cmd = 'edit -x -n %s %s' % (line_nb, local_fname)
ip = get_ipapi()
ip.magic(cmd)
if ask_yes_no('Do you want to apply the new code on the server?', 'y'):
print 'Storing...',
try:
f = file(local_fname)
try:
new_code = f.read()
ms.SetMacroCode([remote_fname, new_code])
print MSG_DONE
except Exception, e:
print MSG_FAILED
print 'Reason:', str(e)
f.close()
except:
print 'Could not open file \'%s\' for safe transfer to the ' \
'server' % local_fname
print 'Did you forget to save?'
else:
print "Discarding changes..."
# if os.path.exists(local_fname):
# if ask_yes_no('Delete temporary file \'%s\'?' % local_fname, 'y'):
# os.remove(local_fname)
# bkp = '%s~' % local_fname
# if os.path.exists(bkp):
# os.remove(bkp)
try:
os.remove(local_fname)
except:
pass
def spock_late_startup_hook(self):
try:
get_door().setConsoleReady(True)
except:
import traceback
print "Exception in spock_late_startup_hook:"
traceback.print_exc()
def spock_pre_prompt_hook(self):
try:
get_door().pre_prompt_hook(self)
except:
import traceback
print "Exception in spock_pre_prompt_hook:"
traceback.print_exc()
# def spock_pre_runcode_hook(self):
# print "spock_pre_runcode_hook"
# return None
| lgpl-3.0 | 3,876,645,581,433,441,000 | 31.543919 | 109 | 0.570643 | false |
wjo1212/aliyun-log-python-sdk | aliyun/log/es_migration/migration_manager.py | 1 | 8762 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
import logging
import time
from multiprocessing import Pool
from aliyun.log import LogClient
from aliyun.log.es_migration.collection_task import (CollectionTaskStatus,
run_collection_task)
from aliyun.log.es_migration.collection_task_config import CollectionTaskConfig
from aliyun.log.es_migration.index_logstore_mappings import \
IndexLogstoreMappings
from aliyun.log.es_migration.mapping_index_converter import \
MappingIndexConverter
from aliyun.log.es_migration.util import split_and_strip
from aliyun.log.logexception import LogException
from elasticsearch import Elasticsearch
results = []
def log_result(result):
results.append(result)
class MigrationManager(object):
def __init__(self, hosts=None, indexes=None, query=None, scroll="5m", endpoint=None, project_name=None,
access_key_id=None, access_key=None, logstore_index_mappings=None, pool_size=10, time_reference=None,
source=None, topic=None, wait_time_in_secs=60):
"""
:param hosts: required, a comma-separated list of source ES nodes.
(example: "localhost:9200,other_host:9200")
:param indexes: optional, a comma-separated list of source index names.
(default: None, which will pull all indexes. example: "index1,index2")
:param query: optional, used to filter docs, so that you can specify the docs you want to migrate.
(default: None, example: '{"query":{"match":{"es_text":"text1"}}}')
:param scroll: optional, specify how long a consistent view of the index should be
maintained for scrolled search. (default: "5m", example: "10m")
:param endpoint: required, specify the endpoint of your log services.
(example: "cn-beijing.log.aliyuncs.com")
:param project_name: required, specify the project_name of your log services.
:param access_key_id: required, specify the access_key_id of your account.
:param access_key: required, specify the access_key of your account.
:param logstore_index_mappings: optional, specify the mappings of log service logstore and ES index.
(default is one-to-one mapping,
example: '{"logstore1": "my_index*","logstore2": "a_index,b_index"}')
:param pool_size: optional, specify the size of process pool.
The process pool will be used to run collection tasks.
(default: 10, example: 20)
:param time_reference: optional, specify what ES doc's field to use as log's time field.
(default: None, which will use current timestamp as log's time. example: "field1")
:param source: optional, specify the value of log's source field.
(default: None, which will be the value of hosts. example: "your_source")
:param topic: optional, specify the value of log's topic field.
(default: None, example: "your_topic")
:param wait_time_in_secs: optional, specify the waiting time before execute data migration task after init aliyun log.
(default: 60, example: 120)
"""
self.hosts = hosts
self.indexes = indexes
self.query = query
self.scroll = scroll
self.endpoint = endpoint
self.project_name = project_name
self.access_key_id = access_key_id
self.access_key = access_key
self.logstore_index_mappings = logstore_index_mappings
self.pool_size = pool_size
self.time_reference = time_reference
self.source = source
self.topic = topic
self.wait_time_in_secs = wait_time_in_secs
def migrate(self):
es = Elasticsearch(split_and_strip(self.hosts))
log_client = LogClient(self.endpoint, self.access_key_id, self.access_key)
index_lst = self.get_index_lst(es, self.indexes)
index_logstore_mappings = IndexLogstoreMappings(index_lst, self.logstore_index_mappings)
self.init_aliyun_log(es, log_client, self.project_name, index_logstore_mappings, self.wait_time_in_secs)
shard_cnt = self.get_shard_count(es, self.indexes, self.query)
p = Pool(min(shard_cnt, self.pool_size))
for i in range(shard_cnt):
config = CollectionTaskConfig(task_id=i,
slice_id=i,
slice_max=shard_cnt,
hosts=self.hosts,
indexes=self.indexes,
query=self.query,
scroll=self.scroll,
endpoint=self.endpoint,
project=self.project_name,
access_key_id=self.access_key_id,
access_key=self.access_key,
index_logstore_mappings=index_logstore_mappings,
time_reference=self.time_reference,
source=self.source,
topic=self.topic)
p.apply_async(func=run_collection_task, args=(config,), callback=log_result)
p.close()
p.join()
self.logging_summary_info(shard_cnt)
@classmethod
def logging_summary_info(cls, shard_cnt):
total_started_task_cnt = shard_cnt
success_task_cnt = 0
fail_task_cnt = 0
doc_cnt = 0
logging.info("========Tasks Info========")
for res in results:
logging.info(res)
doc_cnt += res.count
if res.status == CollectionTaskStatus.SUCCESS:
success_task_cnt += 1
else:
fail_task_cnt += 1
logging.info("========Summary========")
logging.info("Total started task count: %d", total_started_task_cnt)
logging.info("Successful task count: %d", success_task_cnt)
logging.info("Failed task count: %d", fail_task_cnt)
logging.info("Total collected documentation count: %d", doc_cnt)
@classmethod
def get_shard_count(cls, es, indexes, query=None):
resp = es.count(index=indexes, body=query)
return resp["_shards"]["total"]
@classmethod
def get_index_lst(cls, es, indexes):
resp = es.indices.stats(index=indexes)
return resp["indices"].keys()
@classmethod
def init_aliyun_log(cls, es, log_client, project_name, index_logstore_mappings, wait_time_in_secs):
logging.info("Start to init aliyun log")
cls._create_logstores(log_client, project_name, index_logstore_mappings)
cls._create_index_configs(es, log_client, project_name, index_logstore_mappings)
logging.info("Init aliyun log successfully")
logging.info("Enter wating time, wait_time_in_secs=%d", wait_time_in_secs)
time.sleep(wait_time_in_secs)
logging.info("Exit wating time")
@classmethod
def _create_logstores(cls, log_client, project_name, index_logstore_mappings):
logstores = index_logstore_mappings.get_all_logstores()
for logstore in logstores:
try:
log_client.create_logstore(project_name=project_name, logstore_name=logstore)
except LogException as e:
if e.get_error_code() == "LogStoreAlreadyExist":
continue
else:
raise
@classmethod
def _create_index_configs(cls, es, log_client, project_name, index_logstore_mappings):
logstores = index_logstore_mappings.get_all_logstores()
for logstore in logstores:
indexes = index_logstore_mappings.get_indexes(logstore)
first_index = True
for index in indexes:
resp = es.indices.get(index=index)
for mapping in resp[index]["mappings"].values():
index_config = MappingIndexConverter.to_index_config(mapping)
if first_index:
try:
log_client.create_index(project_name, logstore, index_config)
first_index = False
except LogException as e:
if e.get_error_code() == "IndexAlreadyExist":
continue
else:
raise
else:
log_client.update_index(project_name, logstore, index_config)
| mit | -2,013,282,463,689,481,700 | 45.606383 | 126 | 0.588222 | false |
luoyetx/mxnet | tools/ipynb2md.py | 41 | 2272 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Convert jupyter notebook into the markdown format. The notebook outputs will be
removed.
It is heavily adapted from https://gist.github.com/decabyte/0ed87372774cf5d34d7e
"""
import sys
import io
import os
import argparse
import nbformat
def remove_outputs(nb):
"""Removes the outputs cells for a jupyter notebook."""
for cell in nb.cells:
if cell.cell_type == 'code':
cell.outputs = []
def clear_notebook(old_ipynb, new_ipynb):
with io.open(old_ipynb, 'r') as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
remove_outputs(nb)
with io.open(new_ipynb, 'w', encoding='utf8') as f:
nbformat.write(nb, f, nbformat.NO_CONVERT)
def main():
parser = argparse.ArgumentParser(
description="Jupyter Notebooks to markdown"
)
parser.add_argument("notebook", nargs=1, help="The notebook to be converted.")
parser.add_argument("-o", "--output", help="output markdown file")
args = parser.parse_args()
old_ipynb = args.notebook[0]
new_ipynb = 'tmp.ipynb'
md_file = args.output
print md_file
if not md_file:
md_file = os.path.splitext(old_ipynb)[0] + '.md'
clear_notebook(old_ipynb, new_ipynb)
os.system('jupyter nbconvert ' + new_ipynb + ' --to markdown --output ' + md_file)
with open(md_file, 'a') as f:
f.write('<!-- INSERT SOURCE DOWNLOAD BUTTONS -->')
os.system('rm ' + new_ipynb)
if __name__ == '__main__':
main()
| apache-2.0 | -4,459,009,894,870,522,400 | 28.894737 | 86 | 0.685299 | false |
atpy/atpy | atpy/registry.py | 1 | 6496 | _readers = {}
_writers = {}
_set_readers = {}
_set_writers = {}
_extensions = {}
def register_reader(ttype, function, override=False):
'''
Register a table reader function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when reading.
*function*: [ function ]
The function to read in a single table.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _readers or override:
_readers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_writer(ttype, function, override=False):
'''
Register a table writer function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when writing.
*function*: [ function ]
The function to write out a single table.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _writers or override:
_writers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_set_reader(ttype, function, override=False):
'''
Register a table set reader function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when reading.
*function*: [ function ]
The function to read in a table set.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _set_readers or override:
_set_readers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_set_writer(ttype, function, override=False):
'''
Register a table set writer function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when writing.
*function*: [ function ]
The function to write out a table set.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _set_writers or override:
_set_writers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_extensions(ttype, extensions, override=False):
'''
Associate file extensions with a specific table type
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that is used to
specify the table type when reading.
*extensions*: [ string or list or tuple ]
List of valid extensions for the table type - used for auto type
selection. All extensions should be given in lowercase as file
extensions are converted to lowercase before checking against this
list. If a single extension is given, it can be specified as a
string rather than a list of strings
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any extensions if already present.
'''
if type(extensions) == str:
extensions = [extensions]
for extension in extensions:
if not extension in _extensions or override:
_extensions[extension] = ttype
else:
raise Exception("Extension %s is already defined" % extension)
def _determine_type(string, verbose):
if not isinstance(string, basestring):
raise Exception('Could not determine table type (non-string argument)')
s = str(string).lower()
if not '.' in s:
extension = s
else:
extension = s.split('.')[-1]
if extension.lower() in ['gz', 'bz2', 'bzip2']:
extension = s.split('.')[-2]
if extension in _extensions:
table_type = _extensions[extension]
if verbose:
print("Auto-detected table type: %s" % table_type)
else:
raise Exception('Could not determine table type for extension %s' % extension)
return table_type
from . import fitstable
register_reader('fits', fitstable.read)
register_writer('fits', fitstable.write)
register_set_reader('fits', fitstable.read_set)
register_set_writer('fits', fitstable.write_set)
register_extensions('fits', ['fit', 'fits'])
from . import votable
register_reader('vo', votable.read)
register_writer('vo', votable.write)
register_set_reader('vo', votable.read_set)
register_set_writer('vo', votable.write_set)
register_extensions('vo', ['xml', 'vot'])
from . import ipactable
register_reader('ipac', ipactable.read)
register_writer('ipac', ipactable.write)
register_extensions('ipac', ['ipac', 'tbl'])
from . import sqltable
register_reader('sql', sqltable.read)
register_writer('sql', sqltable.write)
register_set_reader('sql', sqltable.read_set)
register_set_writer('sql', sqltable.write_set)
register_extensions('sql', ['sqlite', 'postgres', 'mysql', 'db'])
from . import asciitables
register_reader('cds', asciitables.read_cds)
register_reader('mrt', asciitables.read_cds)
register_reader('latex', asciitables.read_latex)
register_writer('latex', asciitables.write_latex)
register_reader('rdb', asciitables.read_rdb)
register_writer('rdb', asciitables.write_rdb)
register_extensions('rdb', ['rdb'])
register_reader('daophot', asciitables.read_daophot)
register_reader('ascii', asciitables.read_ascii)
register_writer('ascii', asciitables.write_ascii)
from . import hdf5table
register_reader('hdf5', hdf5table.read)
register_set_reader('hdf5', hdf5table.read_set)
register_writer('hdf5', hdf5table.write)
register_set_writer('hdf5', hdf5table.write_set)
register_extensions('hdf5', ['hdf5', 'h5'])
from . import irsa_service
register_reader('irsa', irsa_service.read)
from . import vo_conesearch
register_reader('vo_conesearch', vo_conesearch.read)
from . import htmltable
register_writer('html', htmltable.write)
register_extensions('html', ['html', 'htm'])
| mit | 6,330,407,224,070,350,000 | 27.243478 | 86 | 0.655018 | false |
thomas-sterrenburg/fingerprinting-python | src/static/constants.py | 1 | 1239 | # Copyright 2017 Thomas Sterrenburg
#
# Licensed under the MIT License (the License); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at https://opensource.org/licenses/MIT#
import datetime
# file and directory names
CACHE = 'data/cache'
KNOWN = 'data/known'
REQUESTS = 'data/requests'
BLACKLIST = 'data/blacklist'
# CSV = 'aaa_' + str(datetime.datetime.now()).replace(' ', '_')[:-7] + '.csv'
CSV = 'aaa.csv'
# files to ignore in the requests directories
REQUEST_BLACKLIST = ['.keep', '.DS_Store']
# export csv with server names as columns, instead of hostnames
SERVER_NAMES = False
# failure handler times
PAUSE_TIME_AFTER_TIMEOUT = 1
MAX_ATTEMPTS_PER_HOST = 3
# logger formatting
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
LOGNAME_START = {
'logname': 'setup',
'host_index': 0,
'host_total': 0
}
# fingerprint attribute names
LEXICAL = 'LEXICAL'
SYNTACTIC = 'SYNTACTIC'
SEMANTIC = 'SEMANTIC'
NO_RESPONSE = 'NO_RESPONSE'
NO_RESPONSE_CODE = 'NO_RESPONSE_CODE'
NO_RESPONSE_TEXT = 'NONE'
DATA_LIST = 'LIST'
DATA_NONE = None
# TODO make verbose a possibility again
# TODO make part of arguments list
CSV_VERBOSE = True
EXPORT_CSV = True | mit | 1,184,538,720,387,479,800 | 22.396226 | 77 | 0.702179 | false |
jagguli/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/mutable_list.py | 405 | 10386 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, (int, long, slice)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, (int, long)):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __cmp__(self, other):
'cmp'
slen = len(self)
for i in range(slen):
try:
c = cmp(self[i], other[i])
except IndexError:
# must be other is shorter
return 1
else:
# elements not equal
if c: return c
return cmp(slen, len(other))
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, (int, long)):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=cmp, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(cmp=cmp, key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
temp.sort(cmp=cmp, reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| apache-2.0 | 683,050,179,241,921,200 | 32.61165 | 92 | 0.568554 | false |
kochbeck/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/parse/__init__.py | 9 | 4491 | # Natural Language Toolkit: Parsers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
"""
Classes and interfaces for producing tree structures that represent
the internal organization of a text. This task is known as X{parsing}
the text, and the resulting tree structures are called the text's
X{parses}. Typically, the text is a single sentence, and the tree
structure represents the syntactic structure of the sentence.
However, parsers can also be used in other domains. For example,
parsers can be used to derive the morphological structure of the
morphemes that make up a word, or to derive the discourse structure
for a set of utterances.
Sometimes, a single piece of text can be represented by more than one
tree structure. Texts represented by more than one tree structure are
called X{ambiguous} texts. Note that there are actually two ways in
which a text can be ambiguous:
- The text has multiple correct parses.
- There is not enough information to decide which of several
candidate parses is correct.
However, the parser module does I{not} distinguish these two types of
ambiguity.
The parser module defines C{ParserI}, a standard interface for parsing
texts; and two simple implementations of that interface,
C{ShiftReduceParser} and C{RecursiveDescentParser}. It also contains
three sub-modules for specialized kinds of parsing:
- C{nltk.parser.chart} defines chart parsing, which uses dynamic
programming to efficiently parse texts.
- C{nltk.parser.probabilistic} defines probabilistic parsing, which
associates a probability with each parse.
"""
from api import *
from chart import *
from featurechart import *
from pchart import *
from rd import *
from sr import *
from util import *
from viterbi import *
__all__ = [
# Parser interface
'ParserI',
# Parsers
'RecursiveDescentParser', 'SteppingRecursiveDescentParser',
'ShiftReduceParser', 'SteppingShiftReduceParser',
'EarleyChartParser', 'ChartParser', 'SteppingChartParser',
'BottomUpChartParser', 'InsideChartParser', 'RandomChartParser',
'UnsortedChartParser', 'LongestChartParser', 'ViterbiParser',
'FeatureEarleyChartParser',
]
######################################################################
#{ Deprecated
######################################################################
from nltk.internals import Deprecated
class ParseI(ParserI, Deprecated):
"""Use nltk.ParserI instead."""
class AbstractParse(AbstractParser, Deprecated):
"""Use nltk.ParserI instead."""
class RecursiveDescent(RecursiveDescentParser, Deprecated):
"""Use nltk.RecursiveDescentParser instead."""
class SteppingRecursiveDescent(SteppingRecursiveDescentParser, Deprecated):
"""Use nltk.SteppingRecursiveDescentParser instead."""
class ShiftReduce(ShiftReduceParser, Deprecated):
"""Use nltk.ShiftReduceParser instead."""
class SteppingShiftReduce(SteppingShiftReduceParser, Deprecated):
"""Use nltk.SteppingShiftReduceParser instead."""
class EarleyChartParse(EarleyChartParser, Deprecated):
"""Use nltk.EarleyChartParser instead."""
class FeatureEarleyChartParse(FeatureEarleyChartParser, Deprecated):
"""Use nltk.FeatureEarleyChartParser instead."""
class ChartParse(ChartParser, Deprecated):
"""Use nltk.ChartParser instead."""
class SteppingChartParse(SteppingChartParser, Deprecated):
"""Use nltk.SteppingChartParser instead."""
class BottomUpChartParse(BottomUpChartParser, Deprecated):
"""Use nltk.BottomUpChartParser instead."""
class InsideParse(InsideChartParser, Deprecated):
"""Use nltk.InsideChartParser instead."""
class RandomParse(RandomChartParser, Deprecated):
"""Use nltk.RandomChartParser instead."""
class UnsortedParse(UnsortedChartParser, Deprecated):
"""Use nltk.UnsortedChartParser instead."""
class LongestParse(LongestChartParser, Deprecated):
"""Use nltk.LongestChartParser instead."""
class ViterbiParse(ViterbiParser, Deprecated):
"""Use nltk.ViterbiParser instead."""
class GrammarFile(Deprecated):
"""Use nltk.data.load() instead."""
# [xx] had directives: %start, %kimmo, %tagger_file?
def __init__(self, filename=None, verbose=False):
raise ValueError("GrammarFile is no longer supported -- "
"use nltk.data.load() instead.")
| gpl-3.0 | -2,136,103,843,931,177,200 | 40.201835 | 75 | 0.735026 | false |
pbougue/navitia | source/jormungandr/jormungandr/interfaces/v1/Places.py | 2 | 13833 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask_restful import abort
from flask.globals import g
from jormungandr.authentication import get_all_available_instances
from jormungandr.interfaces.v1.decorators import get_serializer
from jormungandr.interfaces.v1.serializer.api import PlacesSerializer, PlacesNearbySerializer
from jormungandr import i_manager, timezone, global_autocomplete, authentication
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.parsers import default_count_arg_type
from copy import deepcopy
from jormungandr.interfaces.v1.transform_id import transform_id
from jormungandr.exceptions import TechnicalError, InvalidArguments
from datetime import datetime
from jormungandr.parking_space_availability.parking_places_manager import ManageParkingPlaces
import ujson as json
from jormungandr.scenarios.utils import places_type
from navitiacommon import parser_args_type
from navitiacommon.parser_args_type import (
TypeSchema,
CoordFormat,
CustomSchemaType,
BooleanType,
OptionValue,
DateTimeFormat,
DepthArgument,
)
from jormungandr.interfaces.common import add_poi_infos_types, handle_poi_infos
import six
class geojson_argument(CustomSchemaType):
def __call__(self, value):
decoded = json.loads(value)
if not decoded:
raise ValueError('invalid shape')
return parser_args_type.geojson_argument(decoded)
def schema(self):
return TypeSchema(type=str) # TODO a better description of the geojson
class Places(ResourceUri):
def __init__(self, *args, **kwargs):
ResourceUri.__init__(
self, authentication=False, output_type_serializer=PlacesSerializer, *args, **kwargs
)
self.parsers["get"].add_argument("q", type=six.text_type, required=True, help="The data to search")
self.parsers["get"].add_argument(
"type[]",
type=OptionValue(list(places_type.keys())),
action="append",
default=["stop_area", "address", "poi", "administrative_region"],
help="The type of data to search",
)
self.parsers["get"].add_argument(
"count", type=default_count_arg_type, default=10, help="The maximum number of places returned"
)
self.parsers["get"].add_argument(
"search_type", type=int, default=0, hidden=True, help="Type of search: firstletter or type error"
)
self.parsers["get"].add_argument(
"_main_stop_area_weight_factor",
type=float,
default=1.0,
hidden=True,
help="multiplicator for the weight of main stop area",
)
self.parsers["get"].add_argument(
"admin_uri[]",
type=six.text_type,
action="append",
help="If filled, will restrain the search within the " "given admin uris",
)
self.parsers["get"].add_argument("depth", type=DepthArgument(), default=1, help="The depth of objects")
self.parsers["get"].add_argument(
"_current_datetime",
type=DateTimeFormat(),
schema_metadata={'default': 'now'},
hidden=True,
default=datetime.utcnow(),
help='The datetime considered as "now". Used for debug, default is '
'the moment of the request. It will mainly change the output '
'of the disruptions.',
)
self.parsers['get'].add_argument(
"disable_geojson", type=BooleanType(), default=False, help="remove geojson from the response"
)
self.parsers['get'].add_argument(
"from",
type=CoordFormat(nullable=True),
help="Coordinates longitude;latitude used to prioritize " "the objects around this coordinate",
)
self.parsers['get'].add_argument(
"_autocomplete",
type=six.text_type,
hidden=True,
help="name of the autocomplete service, used under the hood",
)
self.parsers['get'].add_argument(
'shape', type=geojson_argument(), help='Geographical shape to limit the search.'
)
def get(self, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
self._register_interpreted_parameters(args)
if len(args['q']) == 0:
abort(400, message="Search word absent")
if args['disable_geojson']:
g.disable_geojson = True
user = authentication.get_user(token=authentication.get_token(), abort_if_no_token=False)
if args['shape'] is None and user and user.shape:
args['shape'] = json.loads(user.shape)
if user and user.default_coord:
if args['from'] is None:
args['from'] = CoordFormat()(user.default_coord)
else:
if args['from'] == '':
raise InvalidArguments("if 'from' is provided it cannot be null")
# If a region or coords are asked, we do the search according
# to the region, else, we do a word wide search
if any([region, lon, lat]):
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
response = i_manager.dispatch(args, "places", instance_name=self.region)
else:
available_instances = get_all_available_instances(user)
autocomplete = global_autocomplete.get('bragi')
if not autocomplete:
raise TechnicalError('world wide autocompletion service not available')
response = autocomplete.get(args, instances=available_instances)
return response, 200
def options(self, **kwargs):
return self.api_description(**kwargs)
class PlaceUri(ResourceUri):
def __init__(self, *args, **kwargs):
ResourceUri.__init__(
self, authentication=False, output_type_serializer=PlacesSerializer, *args, **kwargs
)
self.parsers["get"].add_argument(
"bss_stands",
type=BooleanType(),
default=False,
deprecated=True,
help="DEPRECATED, Use add_poi_infos[]=bss_stands",
)
self.parsers["get"].add_argument(
"add_poi_infos[]",
type=OptionValue(add_poi_infos_types),
default=['bss_stands', 'car_park'],
dest="add_poi_infos",
action="append",
help="Show more information about the poi if it's available, for instance, "
"show BSS/car park availability in the pois(BSS/car park) of the response",
)
self.parsers['get'].add_argument(
"disable_geojson", type=BooleanType(), default=False, help="remove geojson from the response"
)
self.parsers['get'].add_argument(
"disable_disruption", type=BooleanType(), default=False, help="remove disruptions from the response"
)
args = self.parsers["get"].parse_args()
if handle_poi_infos(args["add_poi_infos"], args["bss_stands"]):
self.get_decorators.insert(1, ManageParkingPlaces(self, 'places'))
if args['disable_geojson']:
g.disable_geojson = True
self.parsers['get'].add_argument(
"_autocomplete",
type=six.text_type,
hidden=True,
help="name of the autocomplete service, used under the hood",
)
def get(self, id, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
args.update({"uri": transform_id(id), "_current_datetime": datetime.utcnow()})
if any([region, lon, lat]):
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
response = i_manager.dispatch(args, "place_uri", instance_name=self.region)
else:
user = authentication.get_user(token=authentication.get_token(), abort_if_no_token=False)
available_instances = get_all_available_instances(user)
autocomplete = global_autocomplete.get('bragi')
if not autocomplete:
raise TechnicalError('world wide autocompletion service not available')
response = autocomplete.get_by_uri(args["uri"], instances=available_instances)
return response, 200
def options(self, **kwargs):
return self.api_description(**kwargs)
places_types = {
'stop_areas',
'stop_points',
'pois',
'addresses',
'coords',
'places',
'coord',
} # add admins when possible
class PlacesNearby(ResourceUri):
def __init__(self, *args, **kwargs):
ResourceUri.__init__(self, output_type_serializer=PlacesNearbySerializer, *args, **kwargs)
parser_get = self.parsers["get"]
parser_get.add_argument(
"type[]",
type=OptionValue(list(places_type.keys())),
action="append",
default=["stop_area", "stop_point", "poi"],
help="Type of the objects to return",
)
parser_get.add_argument("filter", type=six.text_type, default="", help="Filter your objects")
parser_get.add_argument("distance", type=int, default=500, help="Distance range of the query in meters")
parser_get.add_argument("count", type=default_count_arg_type, default=10, help="Elements per page")
parser_get.add_argument("depth", type=DepthArgument(), default=1, help="Maximum depth on objects")
parser_get.add_argument("start_page", type=int, default=0, help="The page number of the ptref result")
parser_get.add_argument(
"bss_stands",
type=BooleanType(),
default=False,
deprecated=True,
help="DEPRECATED, Use add_poi_infos[]=bss_stands",
)
parser_get.add_argument(
"add_poi_infos[]",
type=OptionValue(add_poi_infos_types),
default=['bss_stands', 'car_park'],
dest="add_poi_infos",
action="append",
help="Show more information about the poi if it's available, for instance, "
"show BSS/car park availability in the pois(BSS/car park) of the response",
)
parser_get.add_argument(
"_current_datetime",
type=DateTimeFormat(),
schema_metadata={'default': 'now'},
hidden=True,
default=datetime.utcnow(),
help='The datetime considered as "now". Used for debug, default is '
'the moment of the request. It will mainly change the output '
'of the disruptions.',
)
parser_get.add_argument(
"disable_geojson", type=BooleanType(), default=False, help="remove geojson from the response"
)
parser_get.add_argument(
"disable_disruption", type=BooleanType(), default=False, help="remove disruptions from the response"
)
args = parser_get.parse_args()
if handle_poi_infos(args["add_poi_infos"], args["bss_stands"]):
self.get_decorators.insert(1, ManageParkingPlaces(self, 'places_nearby'))
@get_serializer(serpy=PlacesNearbySerializer)
def get(self, region=None, lon=None, lat=None, uri=None):
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
args = self.parsers["get"].parse_args()
if args['disable_geojson']:
g.disable_geojson = True
if uri:
if uri[-1] == '/':
uri = uri[:-1]
uris = uri.split("/")
if len(uris) >= 2:
args["uri"] = transform_id(uris[-1])
# for coherence we check the type of the object
obj_type = uris[-2]
if obj_type not in places_types:
abort(404, message='places_nearby api not available for {}'.format(obj_type))
else:
abort(404)
elif lon and lat:
# check if lon and lat can be converted to float
float(lon)
float(lat)
args["uri"] = "coord:{}:{}".format(lon, lat)
else:
abort(404)
args["filter"] = args["filter"].replace(".id", ".uri")
self._register_interpreted_parameters(args)
response = i_manager.dispatch(args, "places_nearby", instance_name=self.region)
return response, 200
def options(self, **kwargs):
return self.api_description(**kwargs)
| agpl-3.0 | -7,189,060,706,409,084,000 | 40.169643 | 112 | 0.619388 | false |
achabotl/pambox | setup.py | 1 | 3387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import codecs
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.rst')
def check_dependencies():
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
return
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
raise ImportError("pambox requires numpy")
try:
import scipy
except ImportError:
raise ImportError("pambox requires scipy")
try:
import matplotlib
except ImportError:
raise ImportError("pambox requires matplotlib")
try:
import pandas
except ImportError:
raise ImportError("pambox requires pandas")
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--runslow', 'pambox/tests']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
if __name__ == '__main__':
import sys
if not (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean'))):
check_dependencies()
setup(
name='pambox',
description='A Python toolbox for auditory modeling',
author='Alexandre Chabot-Leclerc',
author_email='[email protected]',
version=find_version('pambox', '__init__.py'),
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
tests_require=['pytest'],
install_requires=[
'six>=1.4.1',
],
cmdclass={'test': PyTest},
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
extras_require={
'testing': ['pytest']
}
)
| bsd-3-clause | 6,608,040,403,684,827,000 | 28.198276 | 71 | 0.588131 | false |
vipulroxx/kivy | examples/demo/multistroke/historymanager.py | 38 | 9447 | __all__ = ('GestureHistoryManager', 'GestureVisualizer')
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.graphics import Color, Line, Rectangle
from kivy.properties import ObjectProperty, BooleanProperty, NumericProperty
from kivy.compat import PY2
# local libraries
from helpers import InformationPopup
from settings import MultistrokeSettingsContainer
# refuse heap permute for gestures with more strokes than 3
# (you can increase it, but 4 strokes = 384 templates, 5 = 3840)
MAX_PERMUTE_STROKES = 3
Builder.load_file('historymanager.kv')
class GestureHistoryManager(GridLayout):
selected = ObjectProperty(None, allownone=True)
def __init__(self, **kwargs):
super(GestureHistoryManager, self).__init__(**kwargs)
self.gesturesettingsform = GestureSettingsForm()
rr = self.gesturesettingsform.rrdetails
rr.bind(on_reanalyze_selected=self.reanalyze_selected)
self.infopopup = InformationPopup()
self.recognizer = App.get_running_app().recognizer
def reanalyze_selected(self, *l):
# recognize() can block the UI with max_gpf=100, show a message
self.infopopup.text = 'Please wait, analyzing ..'
self.infopopup.auto_dismiss = False
self.infopopup.open()
# Get a reference to the original GestureContainer object
gesture_obj = self.selected._result_obj._gesture_obj
# Reanalyze the candidate strokes using current database
res = self.recognizer.recognize(gesture_obj.get_vectors(),
max_gpf=100)
# Tag the result with the gesture object (it didn't change)
res._gesture_obj = gesture_obj
# Tag the selected item with the updated ProgressTracker
self.selected._result_obj = res
res.bind(on_complete=self._reanalyze_complete)
def _reanalyze_complete(self, *l):
self.gesturesettingsform.load_visualizer(self.selected)
self.infopopup.dismiss()
def add_selected_to_database(self, *l):
if self.selected is None:
raise Exception('add_gesture_to_database before load_visualizer?')
if self.gesturesettingsform.addsettings is None:
raise Exception('add_gesture_to_database missing addsetings?')
ids = self.gesturesettingsform.addsettings.ids
name = ids.name.value.strip()
if name == '':
self.infopopup.auto_dismiss = True
self.infopopup.text = 'You must specify a name for the gesture'
self.infopopup.open()
return
permute = ids.permute.value
sensitive = ids.orientation_sens.value
strokelen = ids.stroke_sens.value
angle_sim = ids.angle_sim.value
cand = self.selected._result_obj._gesture_obj.get_vectors()
if permute and len(cand) > MAX_PERMUTE_STROKES:
t = "Can't heap permute %d-stroke gesture " % (len(cand))
self.infopopup.text = t
self.infopopup.auto_dismiss = True
self.infopopup.open()
return
self.recognizer.add_gesture(
name,
cand,
use_strokelen=strokelen,
orientation_sensitive=sensitive,
angle_similarity=angle_sim,
permute=permute)
self.infopopup.text = 'Gesture added to database'
self.infopopup.auto_dismiss = True
self.infopopup.open()
def clear_history(self, *l):
if self.selected:
self.visualizer_deselect()
self.ids.history.clear_widgets()
def visualizer_select(self, visualizer, *l):
if self.selected is not None:
self.selected.selected = False
else:
self.add_widget(self.gesturesettingsform)
self.gesturesettingsform.load_visualizer(visualizer)
self.selected = visualizer
def visualizer_deselect(self, *l):
self.selected = None
self.remove_widget(self.gesturesettingsform)
def add_recognizer_result(self, result, *l):
'''The result object is a ProgressTracker with additional
data; in main.py it is tagged with the original GestureContainer
that was analyzed (._gesture_obj)'''
# Create a GestureVisualizer that draws the gesture on canvas
visualizer = GestureVisualizer(result._gesture_obj,
size_hint=(None, None), size=(150, 150))
# Tag it with the result object so AddGestureForm.load_visualizer
# has the results to build labels in the scrollview
visualizer._result_obj = result
visualizer.bind(on_select=self.visualizer_select)
visualizer.bind(on_deselect=self.visualizer_deselect)
# Add the visualizer to the list of gestures in 'history' screen
self.ids.history.add_widget(visualizer)
self._trigger_layout()
self.ids.scrollview.update_from_scroll()
class RecognizerResultLabel(Label):
'''This Label subclass is used to show a single result from the
gesture matching process (is a child of GestureHistoryManager)'''
pass
class RecognizerResultDetails(BoxLayout):
'''Contains a ScrollView of RecognizerResultLabels, ie the list of
matched gestures and their score/distance (is a child of
GestureHistoryManager)'''
def __init__(self, **kwargs):
super(RecognizerResultDetails, self).__init__(**kwargs)
self.register_event_type('on_reanalyze_selected')
def on_reanalyze_selected(self, *l):
pass
class AddGestureSettings(MultistrokeSettingsContainer):
pass
class GestureSettingsForm(BoxLayout):
'''This is the main content of the GestureHistoryManager, the form for
adding a new gesture to the recognizer. It is added to the widget tree
when a GestureVisualizer is selected.'''
def __init__(self, **kwargs):
super(GestureSettingsForm, self).__init__(**kwargs)
self.infopopup = InformationPopup()
self.rrdetails = RecognizerResultDetails()
self.addsettings = None
self.app = App.get_running_app()
def load_visualizer(self, visualizer):
if self.addsettings is None:
self.addsettings = AddGestureSettings()
self.ids.settings.add_widget(self.addsettings)
self.visualizer = visualizer
analysis = self.ids.analysis
analysis.clear_widgets()
analysis.add_widget(self.rrdetails)
scrollv = self.rrdetails.ids.result_scrollview
resultlist = self.rrdetails.ids.result_list
resultlist.clear_widgets()
r = visualizer._result_obj.results
if not len(r):
lbl = RecognizerResultLabel(text='[b]No match[/b]')
resultlist.add_widget(lbl)
scrollv.scroll_y = 1
return
if PY2:
d = r.iteritems
else:
d = r.items
for one in sorted(d(), key=lambda x: x[1]['score'],
reverse=True):
data = one[1]
lbl = RecognizerResultLabel(
text='Name: [b]' + data['name'] + '[/b]' +
'\n Score: ' + str(data['score']) +
'\n Distance: ' + str(data['dist']))
resultlist.add_widget(lbl)
# Make sure the top is visible
scrollv.scroll_y = 1
class GestureVisualizer(Widget):
selected = BooleanProperty(False)
def __init__(self, gesturecontainer, **kwargs):
super(GestureVisualizer, self).__init__(**kwargs)
self._gesture_container = gesturecontainer
self._trigger_draw = Clock.create_trigger(self._draw_item, 0)
self.bind(pos=self._trigger_draw, size=self._trigger_draw)
self._trigger_draw()
self.register_event_type('on_select')
self.register_event_type('on_deselect')
def on_touch_down(self, touch):
if not self.collide_point(touch.x, touch.y):
return
self.selected = not self.selected
self.dispatch(self.selected and 'on_select' or 'on_deselect')
# FIXME: This seems inefficient, is there a better way??
def _draw_item(self, dt):
g = self._gesture_container
bb = g.bbox
minx, miny, maxx, maxy = bb['minx'], bb['miny'], bb['maxx'], bb['maxy']
width, height = self.size
xpos, ypos = self.pos
if g.height > g.width:
to_self = (height * 0.85) / g.height
else:
to_self = (width * 0.85) / g.width
self.canvas.remove_group('gesture')
cand = g.get_vectors()
col = g.color
for stroke in cand:
out = []
append = out.append
for vec in stroke:
x, y = vec
x = (x - minx) * to_self
w = (maxx - minx) * to_self
append(x + xpos + (width - w) * .85 / 2)
y = (y - miny) * to_self
h = (maxy - miny) * to_self
append(y + ypos + (height - h) * .85 / 2)
with self.canvas:
Color(col[0], col[1], col[2], mode='rgb')
Line(points=out, group='gesture', width=2)
def on_select(self, *l):
pass
def on_deselect(self, *l):
pass
| mit | -9,166,076,485,671,849,000 | 33.104693 | 79 | 0.618397 | false |
aviciimaxwell/odoo | addons/hr_timesheet_invoice/wizard/hr_timesheet_final_invoice_create.py | 337 | 3000 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
#
# Create an final invoice based on selected timesheet lines
#
#
# TODO: check unit of measure !!!
#
class final_invoice_create(osv.osv_memory):
_name = 'hr.timesheet.invoice.create.final'
_description = 'Create invoice from timesheet final'
_columns = {
'date': fields.boolean('Date', help='Display date in the history of works'),
'time': fields.boolean('Time Spent', help='Display time in the history of works'),
'name': fields.boolean('Log of Activity', help='Display detail of work in the invoice line.'),
'price': fields.boolean('Cost', help='Display cost of the item you reinvoice'),
'product': fields.many2one('product.product', 'Product', help='The product that will be used to invoice the remaining amount'),
}
def do_create(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
# hack for fixing small issue (context should not propagate implicitly between actions)
if 'default_type' in context:
del context['default_type']
ids = self.pool.get('account.analytic.line').search(cr, uid, [('invoice_id','=',False),('to_invoice','<>', False), ('account_id', 'in', context['active_ids'])], context=context)
invs = self.pool.get('account.analytic.line').invoice_cost_create(cr, uid, ids, data, context=context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
mod_ids = mod_obj.search(cr, uid, [('name', '=', 'action_invoice_tree1')], context=context)[0]
res_id = mod_obj.read(cr, uid, mod_ids, ['res_id'], context=context)['res_id']
act_win = act_obj.read(cr, uid, [res_id], context=context)[0]
act_win['domain'] = [('id','in',invs),('type','=','out_invoice')]
act_win['name'] = _('Invoices')
return act_win
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,851,024,442,269,922,000 | 47.387097 | 185 | 0.629333 | false |
hmendozap/auto-sklearn | autosklearn/metalearning/metafeatures/plot_metafeatures.py | 1 | 20297 | from __future__ import print_function
import argparse
import cPickle
import itertools
import os
import StringIO
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
try:
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
import sklearn.metrics.pairwise
except:
print("Failed to load TSNE, probably you're using sklearn 0.14.X")
from pyMetaLearn.metalearning.meta_base import MetaBase
import pyMetaLearn.metalearning.create_datasets
import pyMetaLearn.data_repositories.openml.apiconnector
def load_dataset(dataset, dataset_directory):
dataset_dir = os.path.abspath(os.path.join(dataset_directory, dataset))
fh = open(os.path.join(dataset_dir, dataset + ".pkl"))
ds = cPickle.load(fh)
fh.close()
data_frame = ds.convert_arff_structure_to_pandas(ds
.get_unprocessed_files())
class_ = data_frame.keys()[-1]
attributes = data_frame.keys()[0:-1]
X = data_frame[attributes]
Y = data_frame[class_]
return X, Y
def plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeatures_times,
runs, method='pca', seed=1, depth=1, distance='l2'):
"""Project datasets in a 2d space and plot them.
arguments:
* metafeatures_plot_dir: a directory to save the generated plots
* metafeatures: a pandas Dataframe from the MetaBase
* runs: a dictionary of runs from the MetaBase
* method: either pca or t-sne
* seed: only used for t-sne
* depth: if 1, a one-step look-ahead is performed
"""
if type(metafeatures) != pd.DataFrame:
raise ValueError("Argument metafeatures must be of type pd.Dataframe "
"but is %s" % str(type(metafeatures)))
############################################################################
# Write out the datasets and their size as a TEX table
# TODO put this in an own function
dataset_tex = StringIO.StringIO()
dataset_tex.write('\\begin{tabular}{lrrr}\n')
dataset_tex.write('\\textbf{Dataset name} & '
'\\textbf{\#features} & '
'\\textbf{\#patterns} & '
'\\textbf{\#classes} \\\\\n')
num_features = []
num_instances = []
num_classes = []
for dataset in sorted(metafeatures.index):
dataset_tex.write('%s & %d & %d & %d \\\\\n' % (
dataset.replace('larochelle_etal_2007_', '').replace(
'_', '-'),
metafeatures.loc[dataset]['number_of_features'],
metafeatures.loc[dataset]['number_of_instances'],
metafeatures.loc[dataset]['number_of_classes']))
num_features.append(metafeatures.loc[dataset]['number_of_features'])
num_instances.append(metafeatures.loc[dataset]['number_of_instances'])
num_classes.append(metafeatures.loc[dataset]['number_of_classes'])
dataset_tex.write('Minimum & %.1f & %.1f & %.1f \\\\\n' %
(np.min(num_features), np.min(num_instances), np.min(num_classes)))
dataset_tex.write('Maximum & %.1f & %.1f & %.1f \\\\\n' %
(np.max(num_features), np.max(num_instances), np.max(num_classes)))
dataset_tex.write('Mean & %.1f & %.1f & %.1f \\\\\n' %
(np.mean(num_features), np.mean(num_instances), np.mean(num_classes)))
dataset_tex.write('10\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 10), np.percentile(num_instances, 10),
np.percentile(num_classes, 10)))
dataset_tex.write('90\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 90), np.percentile(num_instances, 90),
np.percentile(num_classes, 90)))
dataset_tex.write('median & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 50), np.percentile(num_instances, 50),
np.percentile(num_classes, 50)))
dataset_tex.write('\\end{tabular}')
dataset_tex.seek(0)
dataset_tex_output = os.path.join(metafeatures_plot_dir, 'datasets.tex')
with open(dataset_tex_output, 'w') as fh:
fh.write(dataset_tex.getvalue())
############################################################################
# Write out a list of metafeatures, each with the min/max/mean
# calculation time and the min/max/mean value
metafeatures_tex = StringIO.StringIO()
metafeatures_tex.write('\\begin{tabular}{lrrrrrr}\n')
metafeatures_tex.write('\\textbf{Metafeature} & '
'\\textbf{Minimum} & '
'\\textbf{Mean} & '
'\\textbf{Maximum} &'
'\\textbf{Minimum time} &'
'\\textbf{Mean time} &'
'\\textbf{Maximum time} '
'\\\\\n')
for mf_name in sorted(metafeatures.columns):
metafeatures_tex.write('%s & %.2f & %.2f & %.2f & %.2f & %.2f & %.2f \\\\\n'
% (mf_name.replace('_', '-'),
metafeatures.loc[:,mf_name].min(),
metafeatures.loc[:,mf_name].mean(),
metafeatures.loc[:,mf_name].max(),
metafeature_times.loc[:, mf_name].min(),
metafeature_times.loc[:, mf_name].mean(),
metafeature_times.loc[:, mf_name].max()))
metafeatures_tex.write('\\end{tabular}')
metafeatures_tex.seek(0)
metafeatures_tex_output = os.path.join(metafeatures_plot_dir, 'metafeatures.tex')
with open(metafeatures_tex_output, 'w') as fh:
fh.write(metafeatures_tex.getvalue())
# Without this scaling the transformation for visualization purposes is
# useless
metafeatures = metafeatures.copy()
X_min = np.nanmin(metafeatures, axis=0)
X_max = np.nanmax(metafeatures, axis=0)
metafeatures = (metafeatures - X_min) / (X_max - X_min)
# PCA
if method == 'pca':
pca = PCA(2)
transformation = pca.fit_transform(metafeatures.values)
elif method == 't-sne':
if distance == 'l2':
distance_matrix = sklearn.metrics.pairwise.pairwise_distances(
metafeatures.values, metric='l2')
elif distance == 'l1':
distance_matrix = sklearn.metrics.pairwise.pairwise_distances(
metafeatures.values, metric='l1')
elif distance == 'runs':
names_to_indices = dict()
for metafeature in metafeatures.index:
idx = len(names_to_indices)
names_to_indices[metafeature] = idx
X, Y = pyMetaLearn.metalearning.create_datasets\
.create_predict_spearman_rank(metafeatures, runs,
'combination')
# Make a metric matrix out of Y
distance_matrix = np.zeros((metafeatures.shape[0],
metafeatures.shape[0]), dtype=np.float64)
for idx in Y.index:
dataset_names = idx.split("_")
d1 = names_to_indices[dataset_names[0]]
d2 = names_to_indices[dataset_names[1]]
distance_matrix[d1][d2] = Y.loc[idx]
distance_matrix[d2][d1] = Y.loc[idx]
else:
raise NotImplementedError()
# For whatever reason, tsne doesn't accept l1 metric
tsne = TSNE(random_state=seed, perplexity=50, verbose=1)
transformation = tsne.fit_transform(distance_matrix)
# Transform the transformation back to range [0, 1] to ease plotting
transformation_min = np.nanmin(transformation, axis=0)
transformation_max = np.nanmax(transformation, axis=0)
transformation = (transformation - transformation_min) / \
(transformation_max - transformation_min)
print(transformation_min, transformation_max)
#for i, dataset in enumerate(directory_content):
# print dataset, meta_feature_array[i]
fig = plt.figure(dpi=600, figsize=(12, 12))
ax = plt.subplot(111)
# The dataset names must be aligned at the borders of the plot in a way
# the arrows don't cross each other. First, define the different slots
# where the labels will be positioned and then figure out the optimal
# order of the labels
slots = []
# 25 datasets on the top y-axis
slots.extend([(-0.1 + 0.05 * i, 1.1) for i in range(25)])
# 24 datasets on the right x-axis
slots.extend([(1.1, 1.05 - 0.05 * i) for i in range(24)])
# 25 datasets on the bottom y-axis
slots.extend([(-0.1 + 0.05 * i, -0.1) for i in range(25)])
# 24 datasets on the left x-axis
slots.extend([(-0.1, 1.05 - 0.05 * i) for i in range(24)])
# Align the labels on the outer axis
labels_top = []
labels_left = []
labels_right = []
labels_bottom = []
for values in zip(metafeatures.index,
transformation[:, 0], transformation[:, 1]):
label, x, y = values
# Although all plot area goes up to 1.1, 1.1, the range of all the
# points lies inside [0,1]
if x >= y and x < 1.0 - y:
labels_bottom.append((x, label))
elif x >= y and x >= 1.0 - y:
labels_right.append((y, label))
elif y > x and x <= 1.0 -y:
labels_left.append((y, label))
else:
labels_top.append((x, label))
# Sort the labels according to their alignment
labels_bottom.sort()
labels_left.sort()
labels_left.reverse()
labels_right.sort()
labels_right.reverse()
labels_top.sort()
# Build an index label -> x, y
points = {}
for values in zip(metafeatures.index,
transformation[:, 0], transformation[:, 1]):
label, x, y = values
points[label] = (x, y)
# Find out the final positions...
positions_top = {}
positions_left = {}
positions_right = {}
positions_bottom = {}
# Find the actual positions
for i, values in enumerate(labels_bottom):
y, label = values
margin = 1.2 / len(labels_bottom)
positions_bottom[label] = (-0.05 + i * margin, -0.1,)
for i, values in enumerate(labels_left):
x, label = values
margin = 1.2 / len(labels_left)
positions_left[label] = (-0.1, 1.1 - i * margin)
for i, values in enumerate(labels_top):
y, label = values
margin = 1.2 / len(labels_top)
positions_top[label] = (-0.05 + i * margin, 1.1)
for i, values in enumerate(labels_right):
y, label = values
margin = 1.2 / len(labels_right)
positions_right[label] = (1.1, 1.05 - i * margin)
# Do greedy resorting if it decreases the number of intersections...
def resort(label_positions, marker_positions, maxdepth=1):
# TODO: are the inputs dicts or lists
# TODO: two-step look-ahead
def intersect(start1, end1, start2, end2):
# Compute if there is an intersection, for the algorithm see
# Computer Graphics by F.S.Hill
# If one vector is just a point, it cannot intersect with a line...
for v in [start1, start2, end1, end2]:
if not np.isfinite(v).all():
return False # Obviously there is no intersection
def perpendicular(d):
return np.array((-d[1], d[0]))
d1 = end1 - start1 # denoted b
d2 = end2 - start2 # denoted d
d2_1 = start2 - start1 # denoted c
d1_perp = perpendicular(d1) # denoted by b_perp
d2_perp = perpendicular(d2) # denoted by d_perp
t = np.dot(d2_1, d2_perp) / np.dot(d1, d2_perp)
u = - np.dot(d2_1, d1_perp) / np.dot(d2, d1_perp)
if 0 <= t <= 1 and 0 <= u <= 1:
return True # There is an intersection
else:
return False # There is no intersection
def number_of_intersections(label_positions, marker_positions):
num = 0
for key1, key2 in itertools.permutations(label_positions, r=2):
s1 = np.array(label_positions[key1])
e1 = np.array(marker_positions[key1])
s2 = np.array(label_positions[key2])
e2 = np.array(marker_positions[key2])
if intersect(s1, e1, s2, e2):
num += 1
return num
# test if swapping two lines would decrease the number of intersections
# TODO: if this was done with a datastructure different than dicts,
# it could be much faster, because there is a lot of redundant
# computing performed in the second iteration
def swap(label_positions, marker_positions, depth=0,
maxdepth=maxdepth, best_found=sys.maxint):
if len(label_positions) <= 1:
return
two_step_look_ahead = False
while True:
improvement = False
for key1, key2 in itertools.combinations(label_positions, r=2):
before = number_of_intersections(label_positions, marker_positions)
# swap:
tmp = label_positions[key1]
label_positions[key1] = label_positions[key2]
label_positions[key2] = tmp
if depth < maxdepth and two_step_look_ahead:
swap(label_positions, marker_positions,
depth=depth+1, best_found=before)
after = number_of_intersections(label_positions, marker_positions)
if best_found > after and before > after:
improvement = True
print(before, after)
print("Depth %d: Swapped %s with %s" %
(depth, key1, key2))
else: # swap back...
tmp = label_positions[key1]
label_positions[key1] = label_positions[key2]
label_positions[key2] = tmp
if after == 0:
break
# If it is not yet sorted perfectly, do another pass with
# two-step lookahead
if before == 0:
print("Sorted perfectly...")
break
print(depth, two_step_look_ahead)
if two_step_look_ahead:
break
if maxdepth == depth:
print("Reached maximum recursion depth...")
break
if not improvement and depth < maxdepth:
print("Still %d errors, trying two-step lookahead" % before)
two_step_look_ahead = True
swap(label_positions, marker_positions, maxdepth=maxdepth)
resort(positions_bottom, points, maxdepth=depth)
resort(positions_left, points, maxdepth=depth)
resort(positions_right, points, maxdepth=depth)
resort(positions_top, points, maxdepth=depth)
# Helper function
def plot(x, y, label_x, label_y, label, ha, va, relpos, rotation=0):
ax.scatter(x, y, marker='o', label=label, s=80, linewidths=0.1,
color='blue', edgecolor='black')
label = label.replace('larochelle_etal_2007_', '')
x = ax.annotate(label, xy=(x, y), xytext=(label_x, label_y),
ha=ha, va=va, rotation=rotation,
bbox=dict(boxstyle='round', fc='gray', alpha=0.5),
arrowprops=dict(arrowstyle='->', color='black',
relpos=relpos))
# Do the plotting
for i, key in enumerate(positions_bottom):
x, y = positions_bottom[key]
plot(points[key][0], points[key][1], x, y,
key, ha='right', va='top', rotation=45, relpos=(1, 1))
for i, key in enumerate(positions_left):
x, y = positions_left[key]
plot(points[key][0], points[key][1], x, y, key,
ha='right', va='top', rotation=45, relpos=(1, 1))
for i, key in enumerate(positions_top):
x, y = positions_top[key]
plot(points[key][0], points[key][1], x, y, key,
ha='left', va='bottom', rotation=45, relpos=(0, 0))
for i, key in enumerate(positions_right):
x, y = positions_right[key]
plot(points[key][0], points[key][1], x, y, key,
ha='left', va='bottom', rotation=45, relpos=(0, 0))
# Resize everything
box = ax.get_position()
remove = 0.05 * box.width
ax.set_position([box.x0 + remove, box.y0 + remove,
box.width - remove*2, box.height - remove*2])
locs_x = ax.get_xticks()
locs_y = ax.get_yticks()
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xlim((-0.1, 1.1))
ax.set_ylim((-0.1, 1.1))
plt.savefig(os.path.join(metafeatures_plot_dir, "pca.png"))
plt.savefig(os.path.join(metafeatures_plot_dir, "pca.pdf"))
plt.clf()
# Relation of features to each other...
#correlations = []
#for mf_1, mf_2 in itertools.combinations(metafeatures.columns, 2):
# x = metafeatures.loc[:, mf_1]
# y = metafeatures.loc[:, mf_2]
# rho, p = scipy.stats.spearmanr(x, y)
# correlations.append((rho, "%s-%s" % (mf_1, mf_2)))
# plt.figure()
# plt.plot(np.arange(0, 1, 0.01), np.arange(0, 1, 0.01))
# plt.plot(x, y, "x")
# plt.xlabel(mf_1)
# plt.ylabel(mf_2)
# plt.xlim((0, 1))
# plt.ylim((0, 1))
# plt.savefig(os.path.join(target_directory, mf_1 + "__" + mf_2 + "
# .png"))
# plt.close()
#correlations.sort()
#for cor in correlations:
#print cor
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tasks", required=True, type=str)
parser.add_argument("--runs", type=str)
parser.add_argument("experiment_directory", type=str)
parser.add_argument("-m", "--method", default='pca',
choices=['pca', 't-sne'],
help="Dimensionality reduction method")
parser.add_argument("--distance", choices=[None, 'l1', 'l2', 'runs'],
default='l2')
parser.add_argument("-s", "--seed", default=1, type=int)
parser.add_argument("-d", "--depth", default=0, type=int)
parser.add_argument("--subset", default='all', choices=['all', 'pfahringer_2000_experiment1'])
args = parser.parse_args()
with open(args.tasks) as fh:
task_files_list = fh.readlines()
# Load all the experiment run data only if needed
if args.distance == 'runs':
with open(args.runs) as fh:
experiments_file_list = fh.readlines()
else:
experiments_file_list = StringIO.StringIO()
for i in range(len(task_files_list)):
experiments_file_list.write("\n")
experiments_file_list.seek(0)
pyMetaLearn.data_repositories.openml.apiconnector.set_local_directory(
args.experiment_directory)
meta_base = MetaBase(task_files_list, experiments_file_list)
metafeatures = meta_base.get_all_metafeatures_as_pandas(
metafeature_subset=args.subset)
metafeature_times = meta_base.get_all_metafeatures_times_as_pandas(
metafeature_subset=args.subset)
#if args.subset:
# metafeatures = metafeatures.loc[:,subsets[args.subset]]
# metafeature_times = metafeature_times.loc[:,subsets[args.subset]]
runs = meta_base.get_all_runs()
general_plot_directory = os.path.join(args.experiment_directory, "plots")
try:
os.mkdir(general_plot_directory)
except:
pass
metafeatures_plot_dir = os.path.join(general_plot_directory, "metafeatures")
try:
os.mkdir(metafeatures_plot_dir)
except:
pass
plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeature_times,
runs, method=args.method, seed=args.seed,
depth=args.depth, distance=args.distance)
| bsd-3-clause | -632,687,654,449,007,400 | 40.00404 | 98 | 0.560773 | false |
rahlk/Experimental-Algorithms | multiProc/src/parGALE.py | 1 | 4233 | """
"""
from __future__ import print_function, division
import os
from demo import *
import subprocess
import sys
sys.path.append(os.path.abspath('../problems/'))
# Get the git root directory
root=repo_dir = subprocess.Popen(['git'
,'rev-parse'
, '--show-toplevel']
, stdout=subprocess.PIPE
).communicate()[0].rstrip()
sys.path.append(root)
from pdb import set_trace
from dtlz2 import DTLZ2
from multiprocessing import Pool
from random import seed as rseed, randint as randi
import numpy as np
from time import time
from tools.quality import measure
def gale0(model=DTLZ2(n_dec=30,n_obj=3), new=[], pop=int(1e4)):
"""
Recursive FASTMAP clustering.
"""
if len(new)==0:
frontier = model.generate(pop)
else:
frontier=new
frontier.extend(model.generate(pop-len(new)))
N = np.shape(frontier)[0]
leaf = []
norm = np.max(frontier, axis=0) - np.min(frontier, axis=0)
def cdom(x, y, better=['less','less','less']):
def loss1(i,x,y):
return (x - y) if better[i] == 'less' else (y - x)
def expLoss(i,x,y,n):
return np.exp(loss1(i,x,y) / n)
def loss(x, y):
n = min(len(x), len(y)) #lengths should be equal
losses = [expLoss(i,xi,yi,n) for i, (xi, yi) in enumerate(zip(x,y))]
return sum(losses)/n
"x dominates y if it losses least"
return loss(x,y) < loss(y,x)
def distant(lst):
R, C = np.shape(lst)
farthest=lambda one,rest: sorted(rest, key=lambda F: aDist(F,one))[-1]
one=lst[randi(0,R-1)]
mid=farthest(one, lst)
two=farthest(mid, lst)
return one, two
def mutate(lst,good,g=0.15):
new=[]
for l in lst:
new.append([a+(b-a)*g for a,b in zip(l,good)])
return new
def aDist(one, two):
return np.sqrt(np.sum((np.array(one)/norm-np.array(two)/norm)**2))
def recurse(dataset):
R, C = np.shape(dataset) # No. of Rows and Col
# Find the two most distance points.
one, two = distant(dataset)
# Project each case on
def proj(test):
a = aDist(one, test)
b = aDist(two, test)
c = aDist(one, two)
return (a**2-b**2+c**2)/(2*c)
if R<np.sqrt(N):
leaf.extend(dataset)
else:
half1 = cdom(model.solve(one), model.solve(two))
if half1:
_ = recurse(sorted(dataset,key=lambda F:proj(F))[:int(R/2)])
else:
_ = recurse(sorted(dataset,key=lambda F:proj(F))[int(R/2):])
recurse(frontier)
a,b=distant(leaf)
(good, bad) = (a,b) if cdom(model.solve(a), model.solve(b)) else (b,a)
new=mutate(leaf,good,g=0.5)
return new
def gale1(iter=1000,pop=1600,model=DTLZ2(n_dec=30, n_obj=3)):
n_proc = int(1000.00/iter)
new = gale0(model,new=[],pop=int(pop/n_proc))
while iter:
iter-=1
new=gale0(model, new, pop=int(pop/n_proc))
return new
def gale2(pop):
model = DTLZ2(n_dec=30,n_obj=3)
# set_trace()
return gale0(new=model.generate(pop))
def GALE2(n_proc=10,frontSize=100,iters=1000,model=DTLZ2(n_dec=30, n_obj=3)):
"""
WHY do threads take more time than single processors?? FIX THIS!!!
:param n_proc:
:param frontSize:
:param iters:
:param model:
:return:
"""
t = time()
collect=[]
final = []
popSize = [int(frontSize/n_proc)]*n_proc
# initpop = [(model, model.generate(1000), 1000) for _ in xrange(n_proc)]
p=Pool(processes=n_proc)
collect.extend(p.map(gale2, popSize))
for cc in collect: final.extend(cc)
# set_trace()
ret = gale0(model=DTLZ2(n_dec=30, n_obj=3),new=final,pop=len(final))
print('Time Taken: ', time()-t)
return ret
def GALE(n_proc=10,frontSize=100,iters=100):
t = time()
collect=[]
final = []
per = [iters/n_proc]*n_proc
popSize = [frontSize/n_proc]*n_proc
p=Pool(processes=n_proc)
collect.extend(p.map(gale1, per))
for cc in collect: final.extend(cc)
ret = gale0(model=DTLZ2(n_dec=30, n_obj=3),new=final,pop=len(final))
print('Time Taken: ', time()-t)
# true = DTLZ2(n_dec=30, n_obj=3).get_pareto()
m = measure(model=DTLZ2(n_dec=30, n_obj=3))
conv = m.convergence(ret)
print("Convergence:",conv)
# set_trace()
return
if __name__=="__main__":
eval(cmd()) | mit | -8,693,513,753,031,806,000 | 26.673203 | 77 | 0.604063 | false |
campbe13/openhatch | vendor/packages/gdata/src/gdata/youtube/client.py | 96 | 9463 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a client to communicate with the YouTube servers.
A quick and dirty port of the YouTube GDATA 1.0 Python client
libraries to version 2.0 of the GDATA library.
"""
# __author__ = '[email protected] (John Skidgel)'
import logging
import gdata.client
import gdata.youtube.data
import atom.data
import atom.http_core
# Constants
# -----------------------------------------------------------------------------
YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin'
YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime',
'flv')
YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month',
'all_time')
YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating',
'relevance')
YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude')
YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6')
YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured',
'top_rated', 'most_viewed','watch_on_mobile')
YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken'
YOUTUBE_SERVER = 'gdata.youtube.com/feeds/api'
YOUTUBE_SERVICE = 'youtube'
YOUTUBE_VIDEO_FEED_URI = 'http://%s/videos' % YOUTUBE_SERVER
YOUTUBE_USER_FEED_URI = 'http://%s/users/' % YOUTUBE_SERVER
# Takes a youtube video ID.
YOUTUBE_CAPTION_FEED_URI = 'http://gdata.youtube.com/feeds/api/videos/%s/captions'
# Takes a youtube video ID and a caption track ID.
YOUTUBE_CAPTION_URI = 'http://gdata.youtube.com/feeds/api/videos/%s/captiondata/%s'
YOUTUBE_CAPTION_MIME_TYPE = 'application/vnd.youtube.timedtext; charset=UTF-8'
# Classes
# -----------------------------------------------------------------------------
class Error(Exception):
"""Base class for errors within the YouTube service."""
pass
class RequestError(Error):
"""Error class that is thrown in response to an invalid HTTP Request."""
pass
class YouTubeError(Error):
"""YouTube service specific error class."""
pass
class YouTubeClient(gdata.client.GDClient):
"""Client for the YouTube service.
Performs a partial list of Google Data YouTube API functions, such as
retrieving the videos feed for a user and the feed for a video.
YouTube Service requires authentication for any write, update or delete
actions.
"""
api_version = '2'
auth_service = YOUTUBE_SERVICE
auth_scopes = ['https://%s' % YOUTUBE_SERVER]
ssl = True
def get_videos(self, uri=YOUTUBE_VIDEO_FEED_URI, auth_token=None,
desired_class=gdata.youtube.data.VideoFeed,
**kwargs):
"""Retrieves a YouTube video feed.
Args:
uri: A string representing the URI of the feed that is to be retrieved.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.get_feed(uri, auth_token=auth_token,
desired_class=desired_class,
**kwargs)
GetVideos = get_videos
def get_user_feed(self, uri=None, username=None):
"""Retrieve a YouTubeVideoFeed of user uploaded videos.
Either a uri or a username must be provided. This will retrieve list
of videos uploaded by specified user. The uri will be of format
"http://gdata.youtube.com/feeds/api/users/{username}/uploads".
Args:
uri: An optional string representing the URI of the user feed that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserFeed() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserFeed() method')
elif username and not uri:
uri = '%s%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads')
return self.get_feed(uri, desired_class=gdata.youtube.data.VideoFeed)
GetUserFeed = get_user_feed
def get_video_entry(self, uri=None, video_id=None,
auth_token=None, **kwargs):
"""Retrieve a YouTubeVideoEntry.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the entry that is to
be retrieved.
video_id: An optional string representing the ID of the video.
Returns:
A YouTubeVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoEntry() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the get_youtube_video_entry() method')
elif video_id and uri is None:
uri = '%s/%s' % (YOUTUBE_VIDEO_FEED_URI, video_id)
return self.get_feed(uri,
desired_class=gdata.youtube.data.VideoEntry,
auth_token=auth_token,
**kwargs)
GetVideoEntry = get_video_entry
def get_caption_feed(self, uri):
"""Retrieve a Caption feed of tracks.
Args:
uri: A string representing the caption feed's URI to be retrieved.
Returns:
A YouTube CaptionFeed if successfully retrieved.
"""
return self.get_feed(uri, desired_class=gdata.youtube.data.CaptionFeed)
GetCaptionFeed = get_caption_feed
def get_caption_track(self, track_url, client_id,
developer_key, auth_token=None, **kwargs):
http_request = atom.http_core.HttpRequest(uri = track_url, method = 'GET')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Authorization': authsub,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
return self.request(http_request=http_request, **kwargs)
GetCaptionTrack = get_caption_track
def create_track(self, video_id, title, language, body, client_id,
developer_key, auth_token=None, title_type='text', **kwargs):
"""Creates a closed-caption track and adds to an existing YouTube video.
"""
new_entry = gdata.youtube.data.TrackEntry(
content = gdata.youtube.data.TrackContent(text = body, lang = language))
uri = YOUTUBE_CAPTION_FEED_URI % video_id
http_request = atom.http_core.HttpRequest(uri = uri, method = 'POST')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Content-Type': YOUTUBE_CAPTION_MIME_TYPE,
'Content-Language': language,
'Slug': title,
'Authorization': authsub,
'GData-Version': self.api_version,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
http_request.add_body_part(body, http_request.headers['Content-Type'])
return self.request(http_request = http_request,
desired_class = new_entry.__class__, **kwargs)
CreateTrack = create_track
def delete_track(self, video_id, track, client_id, developer_key,
auth_token=None, **kwargs):
"""Deletes a track."""
if isinstance(track, gdata.youtube.data.TrackEntry):
track_id_text_node = track.get_id().split(':')
track_id = track_id_text_node[3]
else:
track_id = track
uri = YOUTUBE_CAPTION_URI % (video_id, track_id)
http_request = atom.http_core.HttpRequest(uri = uri, method = 'DELETE')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Authorization': authsub,
'GData-Version': self.api_version,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
return self.request(http_request=http_request, **kwargs)
DeleteTrack = delete_track
def update_track(self, video_id, track, body, client_id, developer_key,
auth_token=None, **kwargs):
"""Updates a closed-caption track for an existing YouTube video.
"""
track_id_text_node = track.get_id().split(':')
track_id = track_id_text_node[3]
uri = YOUTUBE_CAPTION_URI % (video_id, track_id)
http_request = atom.http_core.HttpRequest(uri = uri, method = 'PUT')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Content-Type': YOUTUBE_CAPTION_MIME_TYPE,
'Authorization': authsub,
'GData-Version': self.api_version,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
http_request.add_body_part(body, http_request.headers['Content-Type'])
return self.request(http_request = http_request,
desired_class = track.__class__, **kwargs)
UpdateTrack = update_track
| agpl-3.0 | -6,060,362,857,601,215,000 | 34.709434 | 94 | 0.646095 | false |
wangjun/odoo | addons/sales_team/sales_team.py | 180 | 6131 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
from openerp import tools
from openerp.osv import fields, osv
class crm_case_section(osv.osv):
_name = "crm.case.section"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Teams"
_order = "complete_name"
_period_number = 5
def get_full_name(self, cr, uid, ids, field_name, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
month_begin = date.today().replace(day=1)
section_result = [{
'value': 0,
'tooltip': tools.ustr((month_begin + relativedelta.relativedelta(months=-i)).strftime('%B %Y')),
} for i in range(self._period_number - 1, -1, -1)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if obj.fields_get(cr, uid, groupby_field)[groupby_field]['type'] == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern)
month_delta = relativedelta.relativedelta(month_begin, group_begin_date)
section_result[self._period_number - (month_delta.months + 1)] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field, 0)}
return section_result
_columns = {
'name': fields.char('Sales Team', size=64, required=True, translate=True),
'complete_name': fields.function(get_full_name, type='char', size=256, readonly=True, store=True),
'code': fields.char('Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to "\
"false, it will allow you to hide the sales team without removing it."),
'change_responsible': fields.boolean('Reassign Escalated', help="When escalating to this team override the salesman with the team leader."),
'user_id': fields.many2one('res.users', 'Team Leader'),
'member_ids': fields.many2many('res.users', 'sale_member_rel', 'section_id', 'member_id', 'Team Members'),
'reply_to': fields.char('Reply-To', size=64, help="The email address put in the 'Reply-To' of all emails sent by Odoo about cases in this sales team"),
'parent_id': fields.many2one('crm.case.section', 'Parent Team'),
'child_ids': fields.one2many('crm.case.section', 'parent_id', 'Child Teams'),
'note': fields.text('Description'),
'working_hours': fields.float('Working Hours', digits=(16, 2)),
'color': fields.integer('Color Index'),
}
_defaults = {
'active': 1,
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code of the sales team must be unique !')
]
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive Sales team.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
"""Overrides orm name_get method"""
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
reads = self.read(cr, uid, ids, ['name', 'parent_id'], context)
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1] + ' / ' + name
res.append((record['id'], name))
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'default_section_id': fields.many2one('crm.case.section', 'Default Sales Team'),
}
def __init__(self, pool, cr):
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['default_section_id'])
return init_res
| agpl-3.0 | 1,121,648,430,971,460,000 | 46.161538 | 167 | 0.590442 | false |
StackPointCloud/libcloud | contrib/generate_provider_feature_matrix_table.py | 6 | 18754 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import inspect
from collections import OrderedDict
from os.path import join as pjoin
this_dir = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(this_dir, '../'))
from libcloud.compute .base import NodeDriver
from libcloud.compute.providers import get_driver as get_compute_driver
from libcloud.compute.providers import DRIVERS as COMPUTE_DRIVERS
from libcloud.compute.types import Provider as ComputeProvider
from libcloud.loadbalancer.base import Driver as LBDriver
from libcloud.loadbalancer.providers import get_driver as get_lb_driver
from libcloud.loadbalancer.providers import DRIVERS as LB_DRIVERS
from libcloud.loadbalancer.types import Provider as LBProvider
from libcloud.storage.base import StorageDriver
from libcloud.storage.providers import get_driver as get_storage_driver
from libcloud.storage.providers import DRIVERS as STORAGE_DRIVERS
from libcloud.storage.types import Provider as StorageProvider
from libcloud.dns.base import DNSDriver
from libcloud.dns.providers import get_driver as get_dns_driver
from libcloud.dns.providers import DRIVERS as DNS_DRIVERS
from libcloud.dns.types import Provider as DNSProvider
from libcloud.container.base import ContainerDriver
from libcloud.container.providers import get_driver as get_container_driver
from libcloud.container.providers import DRIVERS as CONTAINER_DRIVERS
from libcloud.container.types import Provider as ContainerProvider
from libcloud.backup.base import BackupDriver
from libcloud.backup.providers import get_driver as get_backup_driver
from libcloud.backup.providers import DRIVERS as BACKUP_DRIVERS
from libcloud.backup.types import Provider as BackupProvider
HEADER = ('.. NOTE: This file has been generated automatically using '
'generate_provider_feature_matrix_table.py script, don\'t manually '
'edit it')
BASE_API_METHODS = {
'compute_main': ['list_nodes', 'create_node', 'reboot_node',
'destroy_node', 'list_images', 'list_sizes',
'deploy_node'],
'compute_image_management': ['list_images', 'get_image',
'create_image', 'delete_image', 'copy_image'],
'compute_block_storage': ['list_volumes', 'create_volume',
'destroy_volume',
'attach_volume', 'detach_volume',
'list_volume_snapshots',
'create_volume_snapshot'],
'compute_key_pair_management': ['list_key_pairs', 'get_key_pair',
'create_key_pair',
'import_key_pair_from_string',
'import_key_pair_from_file',
'delete_key_pair'],
'loadbalancer': ['create_balancer', 'list_balancers',
'balancer_list_members', 'balancer_attach_member',
'balancer_detach_member', 'balancer_attach_compute_node'],
'storage_main': ['list_containers', 'list_container_objects',
'iterate_containers', 'iterate_container_objects',
'create_container', 'delete_container', 'upload_object',
'upload_object_via_stream', 'download_object',
'download_object_as_stream', 'delete_object'],
'storage_cdn': ['enable_container_cdn', 'enable_object_cdn',
'get_container_cdn_url', 'get_object_cdn_url'],
'dns': ['list_zones', 'list_records', 'iterate_zones', 'iterate_records',
'create_zone', 'update_zone', 'create_record', 'update_record',
'delete_zone', 'delete_record'],
'container': ['install_image', 'list_images', 'deploy_container',
'get_container', 'start_container', 'stop_container',
'restart_container', 'destroy_container', 'list_containers',
'list_locations', 'create_cluster', 'destroy_cluster',
'list_clusters'],
'backup': ['get_supported_target_types', 'list_targets', 'create_target', 'create_target_from_node',
'create_target_from_storage_container', 'update_target', 'delete_target', 'list_recovery_points',
'recover_target', 'recover_target_out_of_place', 'list_target_jobs', 'create_target_job',
'resume_target_job', 'suspend_target_job', 'cancel_target_job']
}
FRIENDLY_METHODS_NAMES = {
'compute_main': {
'list_nodes': 'list nodes',
'create_node': 'create node',
'reboot_node': 'reboot node',
'destroy_node': 'destroy node',
'list_images': 'list images',
'list_sizes': 'list sizes',
'deploy_node': 'deploy node'
},
'compute_image_management': {
'list_images': 'list images',
'get_image': 'get image',
'create_image': 'create image',
'copy_image': 'copy image',
'delete_image': 'delete image'
},
'compute_block_storage': {
'list_volumes': 'list volumes',
'create_volume': 'create volume',
'destroy_volume': 'destroy volume',
'attach_volume': 'attach volume',
'detach_volume': 'detach volume',
'list_volume_snapshots': 'list snapshots',
'create_volume_snapshot': 'create snapshot'
},
'compute_key_pair_management': {
'list_key_pairs': 'list key pairs',
'get_key_pair': 'get key pair',
'create_key_pair': 'create key pair',
'import_key_pair_from_string': 'import public key from string',
'import_key_pair_from_file': 'import public key from file',
'delete_key_pair': 'delete key pair'
},
'loadbalancer': {
'create_balancer': 'create balancer',
'list_balancers': 'list balancers',
'balancer_list_members': 'list members',
'balancer_attach_member': 'attach member',
'balancer_detach_member': 'detach member',
'balancer_attach_compute_node': 'attach compute node'
},
'storage_main': {
'list_containers': 'list containers',
'list_container_objects': 'list objects',
'create_container': 'create container',
'delete_container': 'delete container',
'upload_object': 'upload object',
'upload_object_via_stream': 'streaming object upload',
'download_object': 'download object',
'download_object_as_stream': 'streaming object download',
'delete_object': 'delete object'
},
'storage_cdn': {
'enable_container_cdn': 'enable container cdn',
'enable_object_cdn': 'enable object cdn',
'get_container_cdn_url': 'get container cdn URL',
'get_object_cdn_url': 'get object cdn URL',
},
'dns': {
'list_zones': 'list zones',
'list_records': 'list records',
'create_zone': 'create zone',
'update_zone': 'update zone',
'create_record': 'create record',
'update_record': 'update record',
'delete_zone': 'delete zone',
'delete_record': 'delete record'
},
'container': {
'install_image': 'install image',
'list_images': 'list images',
'deploy_container': 'deploy container',
'get_container': 'get container',
'list_containers': 'list containers',
'start_container': 'start container',
'stop_container': 'stop container',
'restart_container': 'restart container',
'destroy_container': 'destroy container',
'list_locations': 'list locations',
'create_cluster': 'create cluster',
'destroy_cluster': 'destroy cluster',
'list_clusters': 'list clusters'
},
'backup': {
'get_supported_target_types': 'get supported target types',
'list_targets': 'list targets',
'create_target': 'create target',
'create_target_from_node': 'create target from node',
'create_target_from_storage_container': 'create target from storage container',
'update_target': 'update target',
'delete_target': 'delete target',
'list_recovery_points': 'list recovery points',
'recover_target': 'recover target',
'recover_target_out_of_place': 'recover target out of place',
'list_target_jobs': 'list target jobs',
'create_target_job': 'create target job',
'resume_target_job': 'resume target job',
'suspend_target_job': 'suspend target job',
'cancel_target_job': 'cancel target job'
}
}
IGNORED_PROVIDERS = [
'dummy',
# Deprecated constants
'cloudsigma_us',
'cloudfiles_swift'
]
def get_provider_api_names(Provider):
names = [key for key, value in Provider.__dict__.items() if
not key.startswith('__')]
return names
def generate_providers_table(api):
result = {}
if api in ['compute_main', 'compute_image_management',
'compute_block_storage', 'compute_key_pair_management']:
driver = NodeDriver
drivers = COMPUTE_DRIVERS
provider = ComputeProvider
get_driver_method = get_compute_driver
elif api == 'loadbalancer':
driver = LBDriver
drivers = LB_DRIVERS
provider = LBProvider
get_driver_method = get_lb_driver
elif api in ['storage_main', 'storage_cdn']:
driver = StorageDriver
drivers = STORAGE_DRIVERS
provider = StorageProvider
get_driver_method = get_storage_driver
elif api == 'dns':
driver = DNSDriver
drivers = DNS_DRIVERS
provider = DNSProvider
get_driver_method = get_dns_driver
elif api == 'container':
driver = ContainerDriver
drivers = CONTAINER_DRIVERS
provider = ContainerProvider
get_driver_method = get_container_driver
elif api == 'backup':
driver = BackupDriver
drivers = BACKUP_DRIVERS
provider = BackupProvider
get_driver_method = get_backup_driver
else:
raise Exception('Invalid api: %s' % (api))
names = get_provider_api_names(provider)
result = OrderedDict()
for name in names:
enum = getattr(provider, name)
try:
cls = get_driver_method(enum)
except Exception as e:
# Deprecated providers throw an exception
print('Ignoring deprecated constant "%s": %s' % (enum, str(e)))
continue
# Hack for providers which expose multiple classes and support multiple
# API versions
# TODO: Make entry per version
if name.lower() == 'cloudsigma':
from libcloud.compute.drivers.cloudsigma import \
CloudSigma_2_0_NodeDriver
cls = CloudSigma_2_0_NodeDriver
elif name.lower() == 'opennebula':
from libcloud.compute.drivers.opennebula import \
OpenNebula_3_8_NodeDriver
cls = OpenNebula_3_8_NodeDriver
elif name.lower() == 'digital_ocean' and api.startswith('compute'):
from libcloud.compute.drivers.digitalocean import \
DigitalOcean_v2_NodeDriver
cls = DigitalOcean_v2_NodeDriver
if name.lower() in IGNORED_PROVIDERS:
continue
driver_methods = dict(inspect.getmembers(cls,
predicate=inspect.isfunction))
base_methods = dict(inspect.getmembers(driver,
predicate=inspect.isfunction))
base_api_methods = BASE_API_METHODS[api]
result[name] = {'name': cls.name, 'website': cls.website,
'constant': name, 'module': drivers[enum][0],
'class': drivers[enum][1],
'cls': cls,
'methods': {}}
for method_name in base_api_methods:
base_method = base_methods[method_name]
driver_method = driver_methods[method_name]
if method_name == 'deploy_node':
features = getattr(cls, 'features', {}).get('create_node', [])
is_implemented = len(features) >= 1
else:
is_implemented = (id(driver_method) !=
id(base_method))
result[name]['methods'][method_name] = is_implemented
return result
def generate_rst_table(data):
cols = len(data[0])
col_len = [max(len(r[i]) for r in data) for i in range(cols)]
formatter = ' '.join('{:<%d}' % c for c in col_len)
header = formatter.format(*['=' * c for c in col_len])
rows = [formatter.format(*row) for row in data]
result = header + '\n' + rows[0] + '\n' + header + '\n' +\
'\n'.join(rows[1:]) + '\n' + header
return result
def generate_supported_methods_table(api, provider_matrix):
base_api_methods = BASE_API_METHODS[api]
data = []
header = [FRIENDLY_METHODS_NAMES[api][method_name] for method_name in
base_api_methods if not method_name.startswith('iterate_')]
data.append(['Provider'] + header)
for provider, values in sorted(provider_matrix.items()):
provider_name = '`%s`_' % (values['name'])
row = [provider_name]
# TODO: Make it nicer
# list_* methods don't need to be implemented if iterate_* methods are
# implemented
if api == 'storage_main':
if values['methods']['iterate_containers']:
values['methods']['list_containers'] = True
if values['methods']['iterate_container_objects']:
values['methods']['list_container_objects'] = True
elif api == 'dns':
# list_zones and list_records don't need to be implemented if
if values['methods']['iterate_zones']:
values['methods']['list_zones'] = True
if values['methods']['iterate_records']:
values['methods']['list_records'] = True
for method in base_api_methods:
# TODO: ghetto
if method.startswith('iterate_'):
continue
supported = values['methods'][method]
if supported:
row.append('yes')
else:
row.append('no')
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_supported_providers_table(api, provider_matrix):
data = []
header = ['Provider', 'Documentation', 'Provider Constant',
'Supported Regions', 'Module', 'Class Name']
data.append(header)
for provider, values in sorted(provider_matrix.items()):
name_str = '`%s`_' % (values['name'])
module_str = ':mod:`%s`' % (values['module'])
class_str = ':class:`%s`' % (values['class'])
params = {'api': api, 'provider': provider.lower()}
driver_docs_path = pjoin(this_dir,
'../docs/%(api)s/drivers/%(provider)s.rst'
% params)
if os.path.exists(driver_docs_path):
docs_link = ':doc:`Click </%(api)s/drivers/%(provider)s>`' % params
else:
docs_link = ''
cls = values['cls']
supported_regions = cls.list_regions() if hasattr(cls, 'list_regions') \
else None
if supported_regions:
# Sort the regions to achieve stable output
supported_regions = sorted(supported_regions)
supported_regions = ', '.join(supported_regions)
else:
supported_regions = 'single region driver'
row = [name_str, docs_link, values['constant'], supported_regions,
module_str, class_str]
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_tables():
apis = BASE_API_METHODS.keys()
for api in apis:
result = generate_providers_table(api)
docs_dir = api
if api.startswith('compute'):
docs_dir = 'compute'
elif api.startswith('storage'):
docs_dir = 'storage'
supported_providers = generate_supported_providers_table(docs_dir,
result)
supported_methods = generate_supported_methods_table(api, result)
current_path = os.path.dirname(__file__)
target_dir = os.path.abspath(pjoin(current_path,
'../docs/%s/' % (docs_dir)))
file_name_1 = '_supported_providers.rst'
file_name_2 = '_supported_methods.rst'
if api == 'compute_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'compute_image_management':
file_name_2 = '_supported_methods_image_management.rst'
elif api == 'compute_block_storage':
file_name_2 = '_supported_methods_block_storage.rst'
elif api == 'compute_key_pair_management':
file_name_2 = '_supported_methods_key_pair_management.rst'
elif api == 'storage_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'storage_cdn':
file_name_2 = '_supported_methods_cdn.rst'
supported_providers_path = pjoin(target_dir, file_name_1)
supported_methods_path = pjoin(target_dir, file_name_2)
with open(supported_providers_path, 'w') as fp:
fp.write(HEADER + '\n\n')
fp.write(supported_providers)
with open(supported_methods_path, 'w') as fp:
fp.write(HEADER + '\n\n')
fp.write(supported_methods)
generate_tables()
| apache-2.0 | 632,955,592,271,907,100 | 38.81741 | 112 | 0.595713 | false |
odoo-brazil/l10n-brazil-wip | l10n_br_account_product/__manifest__.py | 3 | 2375 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localization Account Product',
'summary': "Brazilian Localization Account Product",
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '8.0.3.0.0',
'depends': [
'l10n_br_data_account',
'account_product_fiscal_classification',
],
'data': [
'l10n_br_account_product_sequence.xml',
'account_invoice_workflow.xml',
'data/l10n_br_account_product.cfop.csv',
'data/l10n_br_account.fiscal.document.csv',
'data/l10n_br_account_data.xml',
'data/l10n_br_account_product_data.xml',
'data/l10n_br_tax.icms_partition.csv',
'data/ir_cron.xml',
'views/l10n_br_account_product_view.xml',
'views/l10n_br_account_view.xml',
'views/l10n_br_account_product_view.xml',
'views/account_view.xml',
'views/account_invoice_view.xml',
'wizard/l10n_br_account_invoice_costs_ratio_view.xml',
'views/nfe/account_invoice_nfe_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/account_product_fiscal_classification_view.xml',
'views/product_view.xml',
'views/res_country_view.xml',
'wizard/l10n_br_account_nfe_export_invoice_view.xml',
'wizard/l10n_br_account_nfe_export_view.xml',
'wizard/l10n_br_account_document_status_sefaz_view.xml',
'wizard/account_invoice_refund_view.xml',
'security/l10n_br_account_product_security.xml',
'security/ir.model.access.csv',
'report/account_invoice_report_view.xml',
],
'demo': [
'demo/account_tax_code_demo.xml',
'demo/account_tax_demo.xml',
'demo/base_demo.xml',
'demo/product_demo.xml',
'demo/l10n_br_account_product_demo.xml',
'demo/account_fiscal_position_rule_demo.xml',
'demo/product_taxes.yml',
],
'test': [
'test/account_customer_invoice.yml',
'test/account_supplier_invoice.yml',
'test/account_invoice_refund.yml',
'test/nfe_export.yml',
],
'installable': False,
'auto_install': False,
}
| agpl-3.0 | 522,042,916,848,649,000 | 36.698413 | 64 | 0.612632 | false |
msingh172/youtube-dl | youtube_dl/extractor/screencastomatic.py | 149 | 1713 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
js_to_json,
)
class ScreencastOMaticIE(InfoExtractor):
_VALID_URL = r'https?://screencast-o-matic\.com/watch/(?P<id>[0-9a-zA-Z]+)'
_TEST = {
'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl',
'md5': '483583cb80d92588f15ccbedd90f0c18',
'info_dict': {
'id': 'c2lD3BeOPl',
'ext': 'mp4',
'title': 'Welcome to 3-4 Philosophy @ DECV!',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
setup_js = self._search_regex(
r"(?s)jwplayer\('mp4Player'\).setup\((\{.*?\})\);",
webpage, 'setup code')
data = self._parse_json(setup_js, video_id, transform_source=js_to_json)
try:
video_data = next(
m for m in data['modes'] if m.get('type') == 'html5')
except StopIteration:
raise ExtractorError('Could not find any video entries!')
video_url = compat_urlparse.urljoin(url, video_data['config']['file'])
thumbnail = data.get('image')
return {
'id': video_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'url': video_url,
'ext': 'mp4',
'thumbnail': thumbnail,
}
| unlicense | 5,264,428,978,670,562,000 | 33.959184 | 119 | 0.556334 | false |
domeger/SplunkTAforPuppetEnterprise | bin/splunktaforpuppetenterprise/solnlib/packages/splunklib/searchcommands/validators.py | 16 | 11478 | # coding=utf-8
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from json.encoder import encode_basestring_ascii as json_encode_string
from collections import namedtuple
from cStringIO import StringIO
from io import open
import csv
import os
import re
class Validator(object):
""" Base class for validators that check and format search command options.
You must inherit from this class and override :code:`Validator.__call__` and
:code:`Validator.format`. :code:`Validator.__call__` should convert the
value it receives as argument and then return it or raise a
:code:`ValueError`, if the value will not convert.
:code:`Validator.format` should return a human readable version of the value
it receives as argument the same way :code:`str` does.
"""
def __call__(self, value):
raise NotImplementedError()
def format(self, value):
raise NotImplementedError()
class Boolean(Validator):
""" Validates Boolean option values.
"""
truth_values = {
'1': True, '0': False,
't': True, 'f': False,
'true': True, 'false': False,
'y': True, 'n': False,
'yes': True, 'no': False
}
def __call__(self, value):
if not (value is None or isinstance(value, bool)):
value = unicode(value).lower()
if value not in Boolean.truth_values:
raise ValueError('Unrecognized truth value: {0}'.format(value))
value = Boolean.truth_values[value]
return value
def format(self, value):
return None if value is None else 't' if value else 'f'
class Code(Validator):
""" Validates code option values.
This validator compiles an option value into a Python code object that can be executed by :func:`exec` or evaluated
by :func:`eval`. The value returned is a :func:`namedtuple` with two members: object, the result of compilation, and
source, the original option value.
"""
def __init__(self, mode='eval'):
"""
:param mode: Specifies what kind of code must be compiled; it can be :const:`'exec'`, if source consists of a
sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
consists of a single interactive statement. In the latter case, expression statements that evaluate to
something other than :const:`None` will be printed.
:type mode: unicode or bytes
"""
self._mode = mode
def __call__(self, value):
if value is None:
return None
try:
return Code.object(compile(value, 'string', self._mode), unicode(value))
except (SyntaxError, TypeError) as error:
raise ValueError(error.message)
def format(self, value):
return None if value is None else value.source
object = namedtuple(b'Code', (b'object', 'source'))
class Fieldname(Validator):
""" Validates field name option values.
"""
pattern = re.compile(r'''[_.a-zA-Z-][_.a-zA-Z0-9-]*$''')
def __call__(self, value):
if value is not None:
value = unicode(value)
if Fieldname.pattern.match(value) is None:
raise ValueError('Illegal characters in fieldname: {}'.format(value))
return value
def format(self, value):
return value
class File(Validator):
""" Validates file option values.
"""
def __init__(self, mode='rt', buffering=None, directory=None):
self.mode = mode
self.buffering = buffering
self.directory = File._var_run_splunk if directory is None else directory
def __call__(self, value):
if value is None:
return value
path = unicode(value)
if not os.path.isabs(path):
path = os.path.join(self.directory, path)
try:
value = open(path, self.mode) if self.buffering is None else open(path, self.mode, self.buffering)
except IOError as error:
raise ValueError('Cannot open {0} with mode={1} and buffering={2}: {3}'.format(
value, self.mode, self.buffering, error))
return value
def format(self, value):
return None if value is None else value.name
_var_run_splunk = os.path.join(
os.environ['SPLUNK_HOME'] if 'SPLUNK_HOME' in os.environ else os.getcwdu(), 'var', 'run', 'splunk')
class Integer(Validator):
""" Validates integer option values.
"""
def __init__(self, minimum=None, maximum=None):
if minimum is not None and maximum is not None:
def check_range(value):
if not (minimum <= value <= maximum):
raise ValueError('Expected integer in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
return
elif minimum is not None:
def check_range(value):
if value < minimum:
raise ValueError('Expected integer in the range [{0},+∞], not {1}'.format(minimum, value))
return
elif maximum is not None:
def check_range(value):
if value > maximum:
raise ValueError('Expected integer in the range [-∞,{0}], not {1}'.format(maximum, value))
return
else:
def check_range(value):
return
self.check_range = check_range
return
def __call__(self, value):
if value is None:
return None
try:
value = long(value)
except ValueError:
raise ValueError('Expected integer value, not {}'.format(json_encode_string(value)))
self.check_range(value)
return value
def format(self, value):
return None if value is None else unicode(long(value))
class Duration(Validator):
""" Validates duration option values.
"""
def __call__(self, value):
if value is None:
return None
p = value.split(':', 2)
result = None
_60 = Duration._60
_unsigned = Duration._unsigned
try:
if len(p) == 1:
result = _unsigned(p[0])
if len(p) == 2:
result = 60 * _unsigned(p[0]) + _60(p[1])
if len(p) == 3:
result = 3600 * _unsigned(p[0]) + 60 * _60(p[1]) + _60(p[2])
except ValueError:
raise ValueError('Invalid duration value: {0}'.format(value))
return result
def format(self, value):
if value is None:
return None
value = int(value)
s = value % 60
m = value // 60 % 60
h = value // (60 * 60)
return '{0:02d}:{1:02d}:{2:02d}'.format(h, m, s)
_60 = Integer(0, 59)
_unsigned = Integer(0)
class List(Validator):
""" Validates a list of strings
"""
class Dialect(csv.Dialect):
""" Describes the properties of list option values. """
strict = True
delimiter = b','
quotechar = b'"'
doublequote = True
lineterminator = b'\n'
skipinitialspace = True
quoting = csv.QUOTE_MINIMAL
def __init__(self, validator=None):
if not (validator is None or isinstance(validator, Validator)):
raise ValueError('Expected a Validator instance or None for validator, not {}', repr(validator))
self._validator = validator
def __call__(self, value):
if value is None or isinstance(value, list):
return value
try:
value = csv.reader([value], self.Dialect).next()
except csv.Error as error:
raise ValueError(error)
if self._validator is None:
return value
try:
for index, item in enumerate(value):
value[index] = self._validator(item)
except ValueError as error:
raise ValueError('Could not convert item {}: {}'.format(index, error))
return value
def format(self, value):
output = StringIO()
writer = csv.writer(output, List.Dialect)
writer.writerow(value)
value = output.getvalue()
return value[:-1]
class Map(Validator):
""" Validates map option values.
"""
def __init__(self, **kwargs):
self.membership = kwargs
def __call__(self, value):
if value is None:
return None
value = unicode(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {0}'.format(value))
return self.membership[value]
def format(self, value):
return None if value is None else self.membership.keys()[self.membership.values().index(value)]
class Match(Validator):
""" Validates that a value matches a regular expression pattern.
"""
def __init__(self, name, pattern, flags=0):
self.name = unicode(name)
self.pattern = re.compile(pattern, flags)
def __call__(self, value):
if value is None:
return None
value = unicode(value)
if self.pattern.match(value) is None:
raise ValueError('Expected {}, not {}'.format(self.name, json_encode_string(value)))
return value
def format(self, value):
return None if value is None else unicode(value)
class OptionName(Validator):
""" Validates option names.
"""
pattern = re.compile(r'''(?=\w)[^\d]\w*$''', re.UNICODE)
def __call__(self, value):
if value is not None:
value = unicode(value)
if OptionName.pattern.match(value) is None:
raise ValueError('Illegal characters in option name: {}'.format(value))
return value
def format(self, value):
return None if value is None else unicode(value)
class RegularExpression(Validator):
""" Validates regular expression option values.
"""
def __call__(self, value):
if value is None:
return None
try:
value = re.compile(unicode(value))
except re.error as error:
raise ValueError('{}: {}'.format(unicode(error).capitalize(), value))
return value
def format(self, value):
return None if value is None else value.pattern
class Set(Validator):
""" Validates set option values.
"""
def __init__(self, *args):
self.membership = set(args)
def __call__(self, value):
if value is None:
return None
value = unicode(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {}'.format(value))
return value
def format(self, value):
return self.__call__(value)
__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
| apache-2.0 | -1,010,238,125,554,260,400 | 28.880208 | 120 | 0.592644 | false |
wsmith323/django | django/contrib/sites/management.py | 467 | 1564 | """
Creates the default Site object.
"""
from django.apps import apps
from django.conf import settings
from django.core.management.color import no_style
from django.db import DEFAULT_DB_ALIAS, connections, router
def create_default_site(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
try:
Site = apps.get_model('sites', 'Site')
except LookupError:
return
if not router.allow_migrate_model(using, Site):
return
if not Site.objects.using(using).exists():
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=getattr(settings, 'SITE_ID', 1), domain="example.com", name="example.com").save(using=using)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[using].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
with connections[using].cursor() as cursor:
for command in sequence_sql:
cursor.execute(command)
| bsd-3-clause | -8,553,015,671,072,065,000 | 40.157895 | 108 | 0.646419 | false |
linktlh/Toontown-journey | toontown/shtiker/ShtikerBook.py | 1 | 19967 | from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.gui.DirectGui import *
from direct.showbase import DirectObject
from pandac.PandaModules import *
from toontown.effects import DistributedFireworkShow
from toontown.nametag import NametagGlobals
from toontown.parties import DistributedPartyFireworksActivity
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
class ShtikerBook(DirectFrame, StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('ShtikerBook')
def __init__(self, doneEvent):
DirectFrame.__init__(self, relief=None, sortOrder=DGG.BACKGROUND_SORT_INDEX)
self.initialiseoptions(ShtikerBook)
StateData.StateData.__init__(self, doneEvent)
self.pages = []
self.pageTabs = []
self.currPageTabIndex = None
self.pageTabFrame = DirectFrame(parent=self, relief=None, pos=(0.93, 1, 0.575), scale=1.25)
self.pageTabFrame.hide()
self.currPageIndex = None
self.pageBeforeNews = None
self.tempLeft = None
self.tempRight = None
self.entered = 0
self.safeMode = 0
self.__obscured = 0
self.__shown = 0
self.__isOpen = 0
self.hide()
self.setPos(0, 0, 0.1)
self.pageOrder = [TTLocalizer.OptionsPageTitle,
TTLocalizer.ShardPageTitle,
TTLocalizer.MapPageTitle,
TTLocalizer.InventoryPageTitle,
TTLocalizer.QuestPageToonTasks,
TTLocalizer.TrackPageShortTitle,
TTLocalizer.SuitPageTitle,
TTLocalizer.FishPageTitle,
TTLocalizer.KartPageTitle,
TTLocalizer.DisguisePageTitle,
TTLocalizer.NPCFriendPageTitle,
TTLocalizer.GardenPageTitle,
TTLocalizer.GolfPageTitle,
TTLocalizer.EventsPageName,
TTLocalizer.AchievementsPageTitle,
TTLocalizer.NewsPageName]
return
def setSafeMode(self, setting):
self.safeMode = setting
def enter(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Open')
if self.entered:
return
self.entered = 1
messenger.send('releaseDirector')
messenger.send('stickerBookEntered')
base.playSfx(self.openSound)
base.disableMouse()
base.render.hide()
base.setBackgroundColor(0.05, 0.15, 0.4)
base.setCellsActive([base.rightCells[0]], 0)
NametagGlobals.setForce2dNametags(True)
NametagGlobals.setForceOnscreenChat(True)
self.__isOpen = 1
self.__setButtonVisibility()
self.show()
self.showPageArrows()
self.tempLeft = 'arrow_left'
self.tempRight = 'arrow_right'
if not self.safeMode:
self.accept('shtiker-page-done', self.__pageDone)
self.accept(ToontownGlobals.StickerBookHotkey, self.__close)
self.accept(ToontownGlobals.OptionsPageHotkey, self.__close)
self.accept('disable-hotkeys', self.__disableHotkeys)
self.accept('enable-hotkeys', self.__enableHotkeys)
self.pageTabFrame.show()
self.pages[self.currPageIndex].enter()
if hasattr(localAvatar, 'newsButtonMgr') and localAvatar.newsButtonMgr:
localAvatar.newsButtonMgr.hideNewIssueButton()
def exit(self):
if not self.entered:
return
self.entered = 0
messenger.send('stickerBookExited')
base.playSfx(self.closeSound)
self.pages[self.currPageIndex].exit()
base.render.show()
setBlackBackground = 0
for obj in base.cr.doId2do.values():
if isinstance(obj, DistributedFireworkShow.DistributedFireworkShow) or isinstance(obj, DistributedPartyFireworksActivity.DistributedPartyFireworksActivity):
setBlackBackground = 1
if setBlackBackground:
base.setBackgroundColor(Vec4(0, 0, 0, 1))
else:
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
gsg = base.win.getGsg()
if gsg:
base.render.prepareScene(gsg)
base.setCellsActive([base.rightCells[0]], 1)
NametagGlobals.setForce2dNametags(False)
NametagGlobals.setForceOnscreenChat(False)
self.__isOpen = 0
self.hide()
self.hideButton()
cleanupDialog('globalDialog')
self.pageTabFrame.hide()
self.ignore('shtiker-page-done')
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore(ToontownGlobals.OptionsPageHotkey)
self.ignore(self.tempRight)
self.ignore(self.tempLeft)
self.ignore('disable-hotkeys')
self.ignore('enable-hotkeys')
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Close')
def load(self):
self.checkGardenStarted = localAvatar.getGardenStarted()
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
self['image'] = bookModel.find('**/big_book')
self['image_scale'] = (2, 1, 1.5)
self.resetFrameSize()
self.bookOpenButton = DirectButton(image=(bookModel.find('**/BookIcon_CLSD'), bookModel.find('**/BookIcon_OPEN'), bookModel.find('**/BookIcon_RLVR')), relief=None, pos=(-0.158, 0, 0.17), parent=base.a2dBottomRight, scale=0.305, command=self.__open)
self.bookCloseButton = DirectButton(image=(bookModel.find('**/BookIcon_OPEN'), bookModel.find('**/BookIcon_CLSD'), bookModel.find('**/BookIcon_RLVR2')), relief=None, pos=(-0.158, 0, 0.17), parent=base.a2dBottomRight, scale=0.305, command=self.__close)
self.bookOpenButton.hide()
self.bookCloseButton.hide()
self.nextArrow = DirectButton(parent=self, relief=None, image=(bookModel.find('**/arrow_button'), bookModel.find('**/arrow_down'), bookModel.find('**/arrow_rollover')), scale=(0.1, 0.1, 0.1), pos=(0.838, 0, -0.661), command=self.__pageChange, extraArgs=[1])
self.prevArrow = DirectButton(parent=self, relief=None, image=(bookModel.find('**/arrow_button'), bookModel.find('**/arrow_down'), bookModel.find('**/arrow_rollover')), scale=(-0.1, 0.1, 0.1), pos=(-0.838, 0, -0.661), command=self.__pageChange, extraArgs=[-1])
bookModel.removeNode()
self.openSound = base.loadSfx('phase_3.5/audio/sfx/GUI_stickerbook_open.ogg')
self.closeSound = base.loadSfx('phase_3.5/audio/sfx/GUI_stickerbook_delete.ogg')
self.pageSound = base.loadSfx('phase_3.5/audio/sfx/GUI_stickerbook_turn.ogg')
return
def unload(self):
loader.unloadModel('phase_3.5/models/gui/stickerbook_gui')
self.destroy()
self.bookOpenButton.destroy()
del self.bookOpenButton
self.bookCloseButton.destroy()
del self.bookCloseButton
self.nextArrow.destroy()
del self.nextArrow
self.prevArrow.destroy()
del self.prevArrow
for page in self.pages:
page.unload()
del self.pages
for pageTab in self.pageTabs:
pageTab.destroy()
del self.pageTabs
del self.currPageTabIndex
del self.openSound
del self.closeSound
del self.pageSound
del self.tempLeft
del self.tempRight
def addPage(self, page, pageName = 'Page'):
if pageName not in self.pageOrder:
self.notify.error('Trying to add page %s in the ShtickerBook. Page not listed in the order.' % pageName)
return
pageIndex = 0
if len(self.pages):
newIndex = len(self.pages)
prevIndex = newIndex - 1
if self.pages[prevIndex].pageName == TTLocalizer.NewsPageName:
self.pages.insert(prevIndex, page)
pageIndex = prevIndex
if self.currPageIndex >= pageIndex:
self.currPageIndex += 1
else:
self.pages.append(page)
pageIndex = len(self.pages) - 1
else:
self.pages.append(page)
pageIndex = len(self.pages) - 1
page.setBook(self)
page.setPageName(pageName)
page.reparentTo(self)
self.addPageTab(page, pageIndex, pageName)
from toontown.shtiker import MapPage
if isinstance(page, MapPage.MapPage):
self.pageBeforeNews = page
def addPageTab(self, page, pageIndex, pageName = 'Page'):
tabIndex = len(self.pageTabs)
def goToPage():
messenger.send('wakeup')
base.playSfx(self.pageSound)
self.setPage(page)
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Browse tabs %s' % page.pageName)
localAvatar.newsButtonMgr.setGoingToNewsPageFromStickerBook(False)
localAvatar.newsButtonMgr.showAppropriateButton()
yOffset = 0.07 * pageIndex
iconGeom = None
iconImage = None
iconScale = 1
iconColor = Vec4(1)
buttonPressedCommand = goToPage
extraArgs = []
if pageName == TTLocalizer.OptionsPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/switch')
iconModels.detachNode()
elif pageName == TTLocalizer.ShardPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/district')
iconModels.detachNode()
elif pageName == TTLocalizer.MapPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/teleportIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.InventoryPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/inventory_icons')
iconGeom = iconModels.find('**/inventory_tart')
iconScale = 7
iconModels.detachNode()
elif pageName == TTLocalizer.QuestPageToonTasks:
iconModels = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
iconGeom = iconModels.find('**/questCard')
iconScale = 0.9
iconModels.detachNode()
elif pageName == TTLocalizer.TrackPageShortTitle:
iconGeom = iconModels = loader.loadModel('phase_3.5/models/gui/filmstrip')
iconScale = 1.1
iconColor = Vec4(0.7, 0.7, 0.7, 1)
iconModels.detachNode()
elif pageName == TTLocalizer.SuitPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/gui_gear')
iconModels.detachNode()
elif pageName == TTLocalizer.FishPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/fish')
iconModels.detachNode()
elif pageName == TTLocalizer.GardenPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/gardenIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.DisguisePageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/disguise2')
iconColor = Vec4(0.7, 0.7, 0.7, 1)
iconModels.detachNode()
elif pageName == TTLocalizer.NPCFriendPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/playingCard')
iconImage = iconModels.find('**/card_back')
iconGeom = iconModels.find('**/logo')
iconScale = 0.22
iconModels.detachNode()
elif pageName == TTLocalizer.KartPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/kartIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.GolfPageTitle:
iconModels = loader.loadModel('phase_6/models/golf/golf_gui')
iconGeom = iconModels.find('**/score_card_icon')
iconModels.detachNode()
elif pageName == TTLocalizer.EventsPageName:
iconModels = loader.loadModel('phase_4/models/parties/partyStickerbook')
iconGeom = iconModels.find('**/Stickerbook_PartyIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.PhotoPageTitle:
iconGeom = iconModels = loader.loadModel('phase_4/models/minigames/photogame_filmroll')
iconScale = (1.9, 1.5, 1.5)
iconModels.detachNode()
elif pageName == TTLocalizer.NewsPageName:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/tt_t_gui_sbk_newsPageTab')
iconModels.detachNode()
buttonPressedCommand = self.goToNewsPage
extraArgs = [page]
if pageName == TTLocalizer.OptionsPageTitle:
pageName = TTLocalizer.OptionsTabTitle
pageTab = DirectButton(parent=self.pageTabFrame, relief=DGG.RAISED, frameSize=(-0.575,
0.575,
-0.575,
0.575), borderWidth=(0.05, 0.05), text=('',
'',
pageName,
''), text_align=TextNode.ALeft, text_pos=(1, -0.2), text_scale=TTLocalizer.SBpageTab, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image=iconImage, image_scale=iconScale, geom=iconGeom, geom_scale=iconScale, geom_color=iconColor, pos=(0, 0, -yOffset), scale=0.06, command=buttonPressedCommand, extraArgs=extraArgs)
self.pageTabs.insert(pageIndex, pageTab)
return
def setPage(self, page, enterPage = True):
if self.currPageIndex is not None:
self.pages[self.currPageIndex].exit()
self.currPageIndex = self.pages.index(page)
self.setPageTabIndex(self.currPageIndex)
if enterPage:
self.showPageArrows()
page.enter()
from toontown.shtiker import NewsPage
if not isinstance(page, NewsPage.NewsPage):
self.pageBeforeNews = page
return
def setPageBeforeNews(self, enterPage = True):
self.setPage(self.pageBeforeNews, enterPage)
self.accept(ToontownGlobals.StickerBookHotkey, self.__close)
self.accept(ToontownGlobals.OptionsPageHotkey, self.__close)
def setPageTabIndex(self, pageTabIndex):
if self.currPageTabIndex is not None and pageTabIndex != self.currPageTabIndex:
self.pageTabs[self.currPageTabIndex]['relief'] = DGG.RAISED
self.currPageTabIndex = pageTabIndex
self.pageTabs[self.currPageTabIndex]['relief'] = DGG.SUNKEN
return
def isOnPage(self, page):
result = False
if self.currPageIndex is not None:
curPage = self.pages[self.currPageIndex]
if curPage == page:
result = True
return result
def obscureButton(self, obscured):
self.__obscured = obscured
self.__setButtonVisibility()
def isObscured(self):
return self.__obscured
def showButton(self):
self.__shown = 1
self.__setButtonVisibility()
localAvatar.newsButtonMgr.showAppropriateButton()
def hideButton(self):
self.__shown = 0
self.__setButtonVisibility()
localAvatar.newsButtonMgr.request('Hidden')
def __setButtonVisibility(self):
if self.__isOpen:
self.bookOpenButton.hide()
self.bookCloseButton.show()
elif self.__shown and not self.__obscured:
self.bookOpenButton.show()
self.bookCloseButton.hide()
else:
self.bookOpenButton.hide()
self.bookCloseButton.hide()
def shouldBookButtonBeHidden(self):
result = False
if self.__isOpen:
pass
elif self.__shown and not self.__obscured:
pass
else:
result = True
return result
def __open(self):
messenger.send('enterStickerBook')
if not localAvatar.getGardenStarted():
for tab in self.pageTabs:
if tab['text'][2] == TTLocalizer.GardenPageTitle:
tab.hide()
def __close(self):
base.playSfx(self.closeSound)
self.doneStatus = {'mode': 'close'}
messenger.send('exitStickerBook')
messenger.send(self.doneEvent)
def closeBook(self):
self.__close()
def __pageDone(self):
page = self.pages[self.currPageIndex]
pageDoneStatus = page.getDoneStatus()
if pageDoneStatus:
if pageDoneStatus['mode'] == 'close':
self.__close()
else:
self.doneStatus = pageDoneStatus
messenger.send(self.doneEvent)
def __pageChange(self, offset):
messenger.send('wakeup')
base.playSfx(self.pageSound)
self.pages[self.currPageIndex].exit()
self.currPageIndex = self.currPageIndex + offset
messenger.send('stickerBookPageChange-' + str(self.currPageIndex))
self.currPageIndex = max(self.currPageIndex, 0)
self.currPageIndex = min(self.currPageIndex, len(self.pages) - 1)
self.setPageTabIndex(self.currPageIndex)
self.showPageArrows()
page = self.pages[self.currPageIndex]
from toontown.shtiker import NewsPage
if isinstance(page, NewsPage.NewsPage):
self.goToNewsPage(page)
else:
page.enter()
self.pageBeforeNews = page
def showPageArrows(self):
if self.currPageIndex == len(self.pages) - 1:
self.prevArrow.show()
self.nextArrow.hide()
else:
self.prevArrow.show()
self.nextArrow.show()
self.__checkForNewsPage()
if self.currPageIndex == 0:
self.prevArrow.hide()
self.nextArrow.show()
def __checkForNewsPage(self):
from toontown.shtiker import NewsPage
self.ignore(self.tempLeft)
self.ignore(self.tempRight)
if isinstance(self.pages[self.currPageIndex], NewsPage.NewsPage):
self.ignore(self.tempLeft)
self.ignore(self.tempRight)
else:
self.accept(self.tempRight, self.__pageChange, [1])
self.accept(self.tempLeft, self.__pageChange, [-1])
def goToNewsPage(self, page):
messenger.send('wakeup')
base.playSfx(self.pageSound)
localAvatar.newsButtonMgr.setGoingToNewsPageFromStickerBook(True)
localAvatar.newsButtonMgr.showAppropriateButton()
self.setPage(page)
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Browse tabs %s' % page.pageName)
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore(ToontownGlobals.OptionsPageHotkey)
localAvatar.newsButtonMgr.acceptEscapeKeyPress()
def disableBookCloseButton(self):
if self.bookCloseButton:
self.bookCloseButton['command'] = None
return
def enableBookCloseButton(self):
if self.bookCloseButton:
self.bookCloseButton['command'] = self.__close
def disableAllPageTabs(self):
for button in self.pageTabs:
button['state'] = DGG.DISABLED
def enableAllPageTabs(self):
for button in self.pageTabs:
button['state'] = DGG.NORMAL
def __disableHotkeys(self):
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore(ToontownGlobals.OptionsPageHotkey)
def __enableHotkeys(self):
self.accept(ToontownGlobals.StickerBookHotkey, self.__close)
self.accept(ToontownGlobals.OptionsPageHotkey, self.__close)
| apache-2.0 | 5,514,424,025,963,807,000 | 40.94958 | 326 | 0.629689 | false |
harri88/harri88.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/vim.py | 364 | 1976 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| mit | -4,318,062,144,764,634,000 | 30.365079 | 70 | 0.448887 | false |
jmcorgan/gnuradio | gr-blocks/python/blocks/qa_wavfile.py | 51 | 2216 | #!/usr/bin/env python
#
# Copyright 2008,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import os
from os.path import getsize
g_in_file = os.path.join(os.getenv("srcdir"), "test_16bit_1chunk.wav")
g_extra_header_offset = 36
g_extra_header_len = 18
class test_wavefile(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_checkwavread(self):
wf = blocks.wavfile_source(g_in_file)
self.assertEqual(wf.sample_rate(), 8000)
def test_002_checkwavcopy(self):
infile = g_in_file
outfile = "test_out.wav"
wf_in = blocks.wavfile_source(infile)
wf_out = blocks.wavfile_sink(outfile,
wf_in.channels(),
wf_in.sample_rate(),
wf_in.bits_per_sample())
self.tb.connect(wf_in, wf_out)
self.tb.run()
wf_out.close()
# we're loosing all extra header chunks
self.assertEqual(getsize(infile) - g_extra_header_len, getsize(outfile))
in_f = file(infile, 'rb')
out_f = file(outfile, 'rb')
in_data = in_f.read()
out_data = out_f.read()
out_f.close()
os.remove(outfile)
# cut extra header chunks input file
self.assertEqual(in_data[:g_extra_header_offset] + \
in_data[g_extra_header_offset + g_extra_header_len:], out_data)
if __name__ == '__main__':
gr_unittest.run(test_wavefile, "test_wavefile.xml")
| gpl-3.0 | 3,665,465,583,131,429,400 | 29.777778 | 81 | 0.665162 | false |
LegitSavage/namebench | nb_third_party/graphy/line_chart.py | 205 | 4253 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to line charts."""
import copy
import warnings
from graphy import common
class LineStyle(object):
"""Represents the style for a line on a line chart. Also provides some
convenient presets.
Object attributes (Passed directly to the Google Chart API. Check there for
details):
width: Width of the line
on: Length of a line segment (for dashed/dotted lines)
off: Length of a break (for dashed/dotted lines)
color: Color of the line. A hex string, like 'ff0000' for red. Optional,
AutoColor will fill this in for you automatically if empty.
Some common styles, such as LineStyle.dashed, are available:
LineStyle.solid()
LineStyle.dashed()
LineStyle.dotted()
LineStyle.thick_solid()
LineStyle.thick_dashed()
LineStyle.thick_dotted()
"""
# Widths
THIN = 1
THICK = 2
# Patterns
# ((on, off) tuples, as passed to LineChart.AddLine)
SOLID = (1, 0)
DASHED = (8, 4)
DOTTED = (2, 4)
def __init__(self, width, on, off, color=None):
"""Construct a LineStyle. See class docstring for details on args."""
self.width = width
self.on = on
self.off = off
self.color = color
@classmethod
def solid(cls):
return LineStyle(1, 1, 0)
@classmethod
def dashed(cls):
return LineStyle(1, 8, 4)
@classmethod
def dotted(cls):
return LineStyle(1, 2, 4)
@classmethod
def thick_solid(cls):
return LineStyle(2, 1, 0)
@classmethod
def thick_dashed(cls):
return LineStyle(2, 8, 4)
@classmethod
def thick_dotted(cls):
return LineStyle(2, 2, 4)
class LineChart(common.BaseChart):
"""Represents a line chart."""
def __init__(self, points=None):
super(LineChart, self).__init__()
if points is not None:
self.AddLine(points)
def AddLine(self, points, label=None, color=None,
pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None):
"""Add a new line to the chart.
This is a convenience method which constructs the DataSeries and appends it
for you. It returns the new series.
points: List of equally-spaced y-values for the line
label: Name of the line (used for the legend)
color: Hex string, like 'ff0000' for red
pattern: Tuple for (length of segment, length of gap). i.e.
LineStyle.DASHED
width: Width of the line (i.e. LineStyle.THIN)
markers: List of Marker objects to attach to this line (see DataSeries
for more info)
"""
if color is not None and isinstance(color[0], common.Marker):
warnings.warn('Your code may be broken! '
'You passed a list of Markers instead of a color. The '
'old argument order (markers before color) is deprecated.',
DeprecationWarning, stacklevel=2)
style = LineStyle(width, pattern[0], pattern[1], color=color)
series = common.DataSeries(points, label=label, style=style,
markers=markers)
self.data.append(series)
return series
def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None,
label=None):
"""DEPRECATED"""
warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ',
DeprecationWarning, stacklevel=2)
return self.AddLine(points, color=color, width=style.width,
pattern=(style.on, style.off), markers=markers,
label=label)
class Sparkline(LineChart):
"""Represent a sparkline. These behave like LineCharts,
mostly, but come without axes.
"""
| apache-2.0 | -4,410,045,448,930,538,500 | 30.043796 | 79 | 0.654597 | false |
tianweizhang/nova | nova/virt/libvirt/vif.py | 2 | 27960 | # Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
DEV_PREFIX_ETH = 'eth'
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def __init__(self, get_connection):
self.get_connection = get_connection
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, vif, image_meta,
inst_type, virt_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
vif_model = image_meta.get('properties',
{}).get('hw_vif_model')
if vif_model is not None:
model = vif_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver)
return conf
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
return True
return False
def get_config_bridge(self, instance, vif, image_meta,
inst_type, virt_type):
"""Get VIF configurations for bridge type."""
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_bridge(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
self.get_ovs_interfaceid(vif),
self.get_vif_devname(vif))
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance, newvif, image_meta,
inst_type, virt_type)
def get_config_ovs(self, instance, vif, image_meta,
inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type)
else:
return self.get_config_ovs_bridge(instance, vif,
image_meta,
inst_type,
virt_type)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type,
virt_type)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance,
vif,
image_meta,
inst_type,
virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta,
inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type,
virt_type)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_mlnx_direct(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
designer.set_vif_host_backend_direct_config(conf, devname)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type%(virt_type)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug('Ensuring vlan %(vlan)s and bridge %(bridge)s',
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network.get_meta('vlan'),
self.get_bridge_name(vif),
iface)
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug("Ensuring bridge %s",
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
iface)
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
pass
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
else:
self.plug_ovs_bridge(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance['uuid'])
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ivs_vif_port(v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
device_id = instance['uuid']
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric,
network_model.VIF_TYPE_MLNX_DIRECT, dev_name,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance["project_id"]
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], 'pgtag2=%s' % net_id,
'pgtag1=%s' % tenant_id, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
else:
self.unplug_ovs_bridge(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
try:
utils.execute('ebrctl', 'del-port', fabric,
vnic_mac, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
iface_id = vif['id']
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, 'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
func(instance, vif)
| apache-2.0 | -5,238,624,102,279,983,000 | 39 | 79 | 0.539521 | false |
XXMrHyde/android_external_chromium_org | chrome/test/functional/webrtc_write_wsh.py | 66 | 2397 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This module is handler for incoming data to the pywebsocket standalone server
# (source is in http://code.google.com/p/pywebsocket/source/browse/trunk/src/).
# It follows the conventions of the pywebsocket server and in our case receives
# and stores incoming frames to disk.
import Queue
import os
import sys
import threading
_NUMBER_OF_WRITER_THREADS = 10
_HOME_ENV_NAME = 'HOMEPATH' if 'win32' == sys.platform else 'HOME'
_WORKING_DIR = os.path.join(os.environ[_HOME_ENV_NAME], 'webrtc_video_quality')
# I couldn't think of other way to handle this but through a global variable
g_frame_number_counter = 0
g_frames_queue = Queue.Queue()
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
while True:
data = request.ws_stream.receive_message()
if data is None:
return
# We assume we will receive only frames, i.e. binary data
global g_frame_number_counter
frame_number = str(g_frame_number_counter)
g_frame_number_counter += 1
g_frames_queue.put((frame_number, data))
class FrameWriterThread(threading.Thread):
"""Writes received frames to disk.
The frames are named in the format frame_xxxx, where xxxx is the 0-padded
frame number. The frames and their numbers are obtained from a synchronized
queue. The frames are written in the directory specified by _WORKING_DIR.
"""
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
while True:
frame_number, frame_data = self._queue.get()
file_name = 'frame_' + frame_number.zfill(4)
file_name = os.path.join(_WORKING_DIR, file_name)
frame = open(file_name, "wb")
frame.write(frame_data)
frame.close()
self._queue.task_done()
def start_threads():
for i in range(_NUMBER_OF_WRITER_THREADS):
t = FrameWriterThread(g_frames_queue)
t.setDaemon(True)
t.start()
g_frames_queue.join()
# This handler's entire code is imported as 'it is' and then incorporated in the
# code of the standalone pywebsocket server. If we put this start_threads() call
# inside a if __name__ == '__main__' clause it wouldn't run this code at all
# (tested).
start_threads()
| bsd-3-clause | -1,515,579,690,475,073,300 | 30.539474 | 80 | 0.705465 | false |
t3dev/odoo | addons/resource/tests/common.py | 15 | 1756 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
class TestResourceCommon(TransactionCase):
def _define_calendar(self, name, attendances, tz):
return self.env['resource.calendar'].create({
'name': name,
'tz': tz,
'attendance_ids': [
(0, 0, {
'name': '%s_%d' % (name, index),
'hour_from': att[0],
'hour_to': att[1],
'dayofweek': str(att[2]),
})
for index, att in enumerate(attendances)
],
})
def setUp(self):
super(TestResourceCommon, self).setUp()
# UTC+1 winter, UTC+2 summer
self.calendar_jean = self._define_calendar('40 Hours', [(8, 16, i) for i in range(5)], 'Europe/Brussels')
# UTC+6
self.calendar_patel = self._define_calendar('38 Hours', sum([((9, 12, i), (13, 17, i)) for i in range(5)], ()), 'Etc/GMT-6')
# UTC-8 winter, UTC-7 summer
self.calendar_john = self._define_calendar('8+12 Hours', [(8, 16, 1), (8, 13, 4), (16, 23, 4)], 'America/Los_Angeles')
# Employee is linked to a resource.resource via resource.mixin
self.jean = self.env['resource.test'].create({
'name': 'Jean',
'resource_calendar_id': self.calendar_jean.id,
})
self.patel = self.env['resource.test'].create({
'name': 'Patel',
'resource_calendar_id': self.calendar_patel.id,
})
self.john = self.env['resource.test'].create({
'name': 'John',
'resource_calendar_id': self.calendar_john.id,
})
| gpl-3.0 | -9,076,034,270,985,825,000 | 37.173913 | 132 | 0.525057 | false |
bvcms/bvcms | CmsWeb/Lib/multiprocessing/dummy/connection.py | 168 | 2807 | #
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from Queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
| gpl-2.0 | 4,601,270,643,949,414,400 | 31.264368 | 79 | 0.691842 | false |
faun/django_test | build/lib/django/contrib/gis/db/backends/oracle/models.py | 310 | 2184 | """
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'column_name'
def __unicode__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
@classmethod
def wkt_col(cls):
return 'wktext'
| bsd-3-clause | 4,523,643,666,607,054,000 | 32.6 | 84 | 0.681319 | false |
lulufei/youtube-dl | test/test_YoutubeDL.py | 24 | 20870 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy
from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_str
from youtube_dl.extractor import YoutubeIE
from youtube_dl.postprocessor.common import PostProcessor
from youtube_dl.utils import match_filter_func
TEST_URL = 'http://localhost/sample.mp4'
class YDL(FakeYDL):
def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs)
self.downloaded_info_dicts = []
self.msgs = []
def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict)
def to_screen(self, msg):
self.msgs.append(msg)
def _make_result(formats, **kwargs):
res = {
'formats': formats,
'id': 'testid',
'title': 'testttitle',
'extractor': 'testex',
}
res.update(**kwargs)
return res
class TestFormatSelection(unittest.TestCase):
def test_prefer_free_formats(self):
# Same resolution => download webm
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 460, 'url': TEST_URL},
{'ext': 'mp4', 'height': 460, 'url': TEST_URL},
]
info_dict = _make_result(formats)
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'webm')
# Different resolution => download best quality (mp4)
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
{'ext': 'mp4', 'height': 1080, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
# No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
{'ext': 'mp4', 'height': 720, 'url': TEST_URL},
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'flv')
def test_format_selection(self):
formats = [
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': '20/47'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '20/71/worst'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
ydl = YDL()
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '2')
ydl = YDL({'format': 'webm/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '3gp/40/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
def test_format_selection_audio(self):
formats = [
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-high')
ydl = YDL({'format': 'worstaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-low')
formats = [
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'vid-high')
def test_format_selection_audio_exts(self):
formats = [
{'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'best'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'aac-64')
ydl = YDL({'format': 'mp3'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'mp3-64')
ydl = YDL({'prefer_free_formats': True})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'ogg-64')
def test_format_selection_video(self):
formats = [
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': TEST_URL},
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': TEST_URL},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-high')
ydl = YDL({'format': 'worstvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
def test_youtube_format_selection(self):
order = [
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
'85', '84', '102', '83', '101', '82', '100',
# Dash video
'137', '248', '136', '247', '135', '246',
'245', '244', '134', '243', '133', '242', '160',
# Dash audio
'141', '172', '140', '171', '139',
]
for f1id, f2id in zip(order, order[1:]):
f1 = YoutubeIE._formats[f1id].copy()
f1['format_id'] = f1id
f1['url'] = 'url:' + f1id
f2 = YoutubeIE._formats[f2id].copy()
f2['format_id'] = f2id
f2['url'] = 'url:' + f2id
info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
info_dict = _make_result([f2, f1], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
def test_format_filtering(self):
formats = [
{'format_id': 'A', 'filesize': 500, 'width': 1000},
{'format_id': 'B', 'filesize': 1000, 'width': 500},
{'format_id': 'C', 'filesize': 1000, 'width': 400},
{'format_id': 'D', 'filesize': 2000, 'width': 600},
{'format_id': 'E', 'filesize': 3000},
{'format_id': 'F'},
{'format_id': 'G', 'filesize': 1000000},
]
for f in formats:
f['url'] = 'http://_/'
f['ext'] = 'unknown'
info_dict = _make_result(formats)
ydl = YDL({'format': 'best[filesize<3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'D')
ydl = YDL({'format': 'best[filesize<=3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': 'best[filesize <= ? 3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'F')
ydl = YDL({'format': 'best [filesize = 1000] [width>450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'B')
ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'C')
ydl = YDL({'format': '[filesize>?1]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
ydl = YDL({'format': '[filesize<1M]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': '[filesize<1MiB]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
class TestYoutubeDL(unittest.TestCase):
def test_subtitles(self):
def s_formats(lang, autocaption=False):
return [{
'ext': ext,
'url': 'http://localhost/video.%s.%s' % (lang, ext),
'_auto': autocaption,
} for ext in ['vtt', 'srt', 'ass']]
subtitles = dict((l, s_formats(l)) for l in ['en', 'fr', 'es'])
auto_captions = dict((l, s_formats(l, True)) for l in ['it', 'pt', 'es'])
info_dict = {
'id': 'test',
'title': 'Test',
'url': 'http://localhost/video.mp4',
'subtitles': subtitles,
'automatic_captions': auto_captions,
'extractor': 'TEST',
}
def get_info(params={}):
params.setdefault('simulate', True)
ydl = YDL(params)
ydl.report_warning = lambda *args, **kargs: None
return ydl.process_video_result(info_dict, download=False)
result = get_info()
self.assertFalse(result.get('requested_subtitles'))
self.assertEqual(result['subtitles'], subtitles)
self.assertEqual(result['automatic_captions'], auto_captions)
result = get_info({'writesubtitles': True})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['en']))
self.assertTrue(subs['en'].get('data') is None)
self.assertEqual(subs['en']['ext'], 'ass')
result = get_info({'writesubtitles': True, 'subtitlesformat': 'foo/srt'})
subs = result['requested_subtitles']
self.assertEqual(subs['en']['ext'], 'srt')
result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'fr']))
result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
self.assertFalse(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto'])
result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
self.assertTrue(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto'])
def test_add_extra_info(self):
test_dict = {
'extractor': 'Foo',
}
extra_info = {
'extractor': 'Bar',
'playlist': 'funny videos',
}
YDL.add_extra_info(test_dict, extra_info)
self.assertEqual(test_dict['extractor'], 'Foo')
self.assertEqual(test_dict['playlist'], 'funny videos')
def test_prepare_filename(self):
info = {
'id': '1234',
'ext': 'mp4',
'width': None,
}
def fname(templ):
ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info)
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
# Replace missing fields with 'NA'
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
def test_format_note(self):
ydl = YoutubeDL()
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
}), '^\s*10k$')
def test_postprocessors(self):
filename = 'post-processor-testfile.mp4'
audiofile = filename + '.mp3'
class SimplePP(PostProcessor):
def run(self, info):
with open(audiofile, 'wt') as f:
f.write('EXAMPLE')
return [info['filepath']], info
def run_pp(params, PP):
with open(filename, 'wt') as f:
f.write('EXAMPLE')
ydl = YoutubeDL(params)
ydl.add_post_processor(PP())
ydl.post_process(filename, {'filepath': filename})
run_pp({'keepvideo': True}, SimplePP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(filename)
os.unlink(audiofile)
run_pp({'keepvideo': False}, SimplePP)
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(audiofile)
class ModifierPP(PostProcessor):
def run(self, info):
with open(info['filepath'], 'wt') as f:
f.write('MODIFIED')
return [], info
run_pp({'keepvideo': False}, ModifierPP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
os.unlink(filename)
def test_match_filter(self):
class FilterYDL(YDL):
def __init__(self, *args, **kwargs):
super(FilterYDL, self).__init__(*args, **kwargs)
self.params['simulate'] = True
def process_info(self, info_dict):
super(YDL, self).process_info(info_dict)
def _match_entry(self, info_dict, incomplete):
res = super(FilterYDL, self)._match_entry(info_dict, incomplete)
if res is None:
self.downloaded_info_dicts.append(info_dict)
return res
first = {
'id': '1',
'url': TEST_URL,
'title': 'one',
'extractor': 'TEST',
'duration': 30,
'filesize': 10 * 1024,
}
second = {
'id': '2',
'url': TEST_URL,
'title': 'two',
'extractor': 'TEST',
'duration': 10,
'description': 'foo',
'filesize': 5 * 1024,
}
videos = [first, second]
def get_videos(filter_=None):
ydl = FilterYDL({'match_filter': filter_})
for v in videos:
ydl.process_ie_result(v, download=True)
return [v['id'] for v in ydl.downloaded_info_dicts]
res = get_videos()
self.assertEqual(res, ['1', '2'])
def f(v):
if v['id'] == '1':
return None
else:
return 'Video id is not 1'
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func('duration < 30')
res = get_videos(f)
self.assertEqual(res, ['2'])
f = match_filter_func('description = foo')
res = get_videos(f)
self.assertEqual(res, ['2'])
f = match_filter_func('description =? foo')
res = get_videos(f)
self.assertEqual(res, ['1', '2'])
f = match_filter_func('filesize > 5KiB')
res = get_videos(f)
self.assertEqual(res, ['1'])
def test_playlist_items_selection(self):
entries = [{
'id': compat_str(i),
'title': compat_str(i),
'url': TEST_URL,
} for i in range(1, 5)]
playlist = {
'_type': 'playlist',
'id': 'test',
'entries': entries,
'extractor': 'test:playlist',
'extractor_key': 'test:playlist',
'webpage_url': 'http://example.com',
}
def get_ids(params):
ydl = YDL(params)
# make a copy because the dictionary can be modified
ydl.process_ie_result(playlist.copy())
return [int(v['id']) for v in ydl.downloaded_info_dicts]
result = get_ids({})
self.assertEqual(result, [1, 2, 3, 4])
result = get_ids({'playlistend': 10})
self.assertEqual(result, [1, 2, 3, 4])
result = get_ids({'playlistend': 2})
self.assertEqual(result, [1, 2])
result = get_ids({'playliststart': 10})
self.assertEqual(result, [])
result = get_ids({'playliststart': 2})
self.assertEqual(result, [2, 3, 4])
result = get_ids({'playlist_items': '2-4'})
self.assertEqual(result, [2, 3, 4])
result = get_ids({'playlist_items': '2,4'})
self.assertEqual(result, [2, 4])
result = get_ids({'playlist_items': '10'})
self.assertEqual(result, [])
if __name__ == '__main__':
unittest.main()
| unlicense | -5,176,356,957,062,949,000 | 36.401434 | 111 | 0.536033 | false |
lochiiconnectivity/boto | boto/ec2/reservedinstance.py | 12 | 8480 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.ec2object import EC2Object
class ReservedInstancesOffering(EC2Object):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None, instance_tenancy=None,
currency_code=None, offering_type=None,
recurring_charges=None, pricing_details=None):
EC2Object.__init__(self, connection)
self.id = id
self.instance_type = instance_type
self.availability_zone = availability_zone
self.duration = duration
self.fixed_price = fixed_price
self.usage_price = usage_price
self.description = description
self.instance_tenancy = instance_tenancy
self.currency_code = currency_code
self.offering_type = offering_type
self.recurring_charges = recurring_charges
self.pricing_details = pricing_details
def __repr__(self):
return 'ReservedInstanceOffering:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'recurringCharges':
self.recurring_charges = ResultSet([('item', RecurringCharge)])
return self.recurring_charges
elif name == 'pricingDetailsSet':
self.pricing_details = ResultSet([('item', PricingDetail)])
return self.pricing_details
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesOfferingId':
self.id = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'duration':
self.duration = int(value)
elif name == 'fixedPrice':
self.fixed_price = value
elif name == 'usagePrice':
self.usage_price = value
elif name == 'productDescription':
self.description = value
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'offeringType':
self.offering_type = value
elif name == 'marketplace':
self.marketplace = True if value == 'true' else False
def describe(self):
print 'ID=%s' % self.id
print '\tInstance Type=%s' % self.instance_type
print '\tZone=%s' % self.availability_zone
print '\tDuration=%s' % self.duration
print '\tFixed Price=%s' % self.fixed_price
print '\tUsage Price=%s' % self.usage_price
print '\tDescription=%s' % self.description
def purchase(self, instance_count=1):
return self.connection.purchase_reserved_instance_offering(self.id, instance_count)
class RecurringCharge(object):
def __init__(self, connection=None, frequency=None, amount=None):
self.frequency = frequency
self.amount = amount
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class PricingDetail(object):
def __init__(self, connection=None, price=None, count=None):
self.price = price
self.count = count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class ReservedInstance(ReservedInstancesOffering):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None,
instance_count=None, state=None):
ReservedInstancesOffering.__init__(self, connection, id, instance_type,
availability_zone, duration, fixed_price,
usage_price, description)
self.instance_count = instance_count
self.state = state
self.start = None
def __repr__(self):
return 'ReservedInstance:%s' % self.id
def endElement(self, name, value, connection):
if name == 'reservedInstancesId':
self.id = value
if name == 'instanceCount':
self.instance_count = int(value)
elif name == 'state':
self.state = value
elif name == 'start':
self.start = value
else:
ReservedInstancesOffering.endElement(self, name, value, connection)
class ReservedInstanceListing(EC2Object):
def __init__(self, connection=None, listing_id=None, id=None,
create_date=None, update_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.listing_id = listing_id
self.id = id
self.create_date = create_date
self.update_date = update_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'instanceCounts':
self.instance_counts = ResultSet([('item', InstanceCount)])
return self.instance_counts
elif name == 'priceSchedules':
self.price_schedules = ResultSet([('item', PriceSchedule)])
return self.price_schedules
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesListingId':
self.listing_id = value
elif name == 'reservedInstancesId':
self.id = value
elif name == 'createDate':
self.create_date = value
elif name == 'updateDate':
self.update_date = value
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
else:
setattr(self, name, value)
class InstanceCount(object):
def __init__(self, connection=None, state=None, instance_count=None):
self.state = state
self.instance_count = instance_count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'state':
self.state = value
elif name == 'instanceCount':
self.instance_count = int(value)
else:
setattr(self, name, value)
class PriceSchedule(object):
def __init__(self, connection=None, term=None, price=None,
currency_code=None, active=None):
self.connection = connection
self.term = term
self.price = price
self.currency_code = currency_code
self.active = active
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'term':
self.term = int(value)
elif name == 'price':
self.price = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'active':
self.active = True if value == 'true' else False
else:
setattr(self, name, value)
| mit | 1,843,460,482,225,525,800 | 36.356828 | 91 | 0.620637 | false |
jyejare/robottelo | tests/foreman/cli/test_repository.py | 1 | 102147 | # -*- encoding: utf-8 -*-
"""Test class for Repository CLI
:Requirement: Repository
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: Repositories
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from fauxfactory import gen_alphanumeric
from fauxfactory import gen_string
from nailgun import entities
from wait_for import wait_for
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.contentview import ContentView
from robottelo.cli.factory import CLIFactoryError
from robottelo.cli.factory import make_content_view
from robottelo.cli.factory import make_filter
from robottelo.cli.factory import make_gpg_key
from robottelo.cli.factory import make_lifecycle_environment
from robottelo.cli.factory import make_org
from robottelo.cli.factory import make_product
from robottelo.cli.factory import make_product_wait
from robottelo.cli.factory import make_repository
from robottelo.cli.factory import make_role
from robottelo.cli.factory import make_user
from robottelo.cli.file import File
from robottelo.cli.filter import Filter
from robottelo.cli.module_stream import ModuleStream
from robottelo.cli.package import Package
from robottelo.cli.puppetmodule import PuppetModule
from robottelo.cli.repository import Repository
from robottelo.cli.role import Role
from robottelo.cli.settings import Settings
from robottelo.cli.srpm import Srpm
from robottelo.cli.task import Task
from robottelo.cli.user import User
from robottelo.constants import CUSTOM_FILE_REPO
from robottelo.constants import CUSTOM_FILE_REPO_FILES_COUNT
from robottelo.constants import CUSTOM_LOCAL_FOLDER
from robottelo.constants import CUSTOM_MODULE_STREAM_REPO_1
from robottelo.constants import CUSTOM_MODULE_STREAM_REPO_2
from robottelo.constants import DOCKER_REGISTRY_HUB
from robottelo.constants import DOWNLOAD_POLICIES
from robottelo.constants import FAKE_0_YUM_REPO
from robottelo.constants import FAKE_1_PUPPET_REPO
from robottelo.constants import FAKE_1_YUM_REPO
from robottelo.constants import FAKE_2_PUPPET_REPO
from robottelo.constants import FAKE_2_YUM_REPO
from robottelo.constants import FAKE_3_PUPPET_REPO
from robottelo.constants import FAKE_3_YUM_REPO
from robottelo.constants import FAKE_4_PUPPET_REPO
from robottelo.constants import FAKE_4_YUM_REPO
from robottelo.constants import FAKE_5_PUPPET_REPO
from robottelo.constants import FAKE_5_YUM_REPO
from robottelo.constants import FAKE_7_PUPPET_REPO
from robottelo.constants import FAKE_PULP_REMOTE_FILEREPO
from robottelo.constants import FAKE_YUM_DRPM_REPO
from robottelo.constants import FAKE_YUM_MIXED_REPO
from robottelo.constants import FAKE_YUM_SRPM_REPO
from robottelo.constants import FEDORA27_OSTREE_REPO
from robottelo.constants import OS_TEMPLATE_DATA_FILE
from robottelo.constants import REPO_TYPE
from robottelo.constants import RPM_TO_UPLOAD
from robottelo.constants import SRPM_TO_UPLOAD
from robottelo.datafactory import invalid_http_credentials
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import valid_data_list
from robottelo.datafactory import valid_docker_repository_names
from robottelo.datafactory import valid_http_credentials
from robottelo.decorators import tier1
from robottelo.decorators import tier2
from robottelo.decorators import upgrade
from robottelo.decorators.host import skip_if_os
from robottelo.helpers import get_data_file
from robottelo.host_info import get_host_os_version
from robottelo.test import CLITestCase
from robottelo.utils.issue_handlers import is_open
class RepositoryTestCase(CLITestCase):
"""Repository CLI tests."""
org = None
product = None
def setUp(self):
"""Tests for Repository via Hammer CLI"""
super(RepositoryTestCase, self).setUp()
if RepositoryTestCase.org is None:
RepositoryTestCase.org = make_org(cached=True)
if RepositoryTestCase.product is None:
RepositoryTestCase.product = make_product_wait(
{'organization-id': RepositoryTestCase.org['id']}
)
def _make_repository(self, options=None):
"""Makes a new repository and asserts its success"""
if options is None:
options = {}
if options.get('product-id') is None:
options['product-id'] = self.product['id']
return make_repository(options)
def _get_image_tags_count(self, repo=None):
repo_detail = Repository.info({'id': repo['id']})
return repo_detail
def _validated_image_tags_count(self, repo=None):
"""Wrapper around Repository.info(), that returns once
container-image-tags in repo is greater than 0.
Needed due to BZ#1664631 (container-image-tags is not populated
immediately after synchronization), which was CLOSED WONTFIX
"""
wait_for(
lambda: int(
self._get_image_tags_count(repo=repo)['content-counts']['container-image-tags']
)
> 0,
timeout=30,
delay=2,
logger=self.logger,
)
return self._get_image_tags_count(repo=repo)
@tier1
@upgrade
def test_positive_info_docker_upstream_name(self):
"""Check if repository docker-upstream-name is shown
in repository info
:id: f197a14c-2cf3-4564-9b18-5fd37d469ea4
:expectedresults: repository info command returns upstream-repository-
name value
:BZ: 1189289
:CaseImportance: Critical
"""
repository = self._make_repository(
{
'content-type': 'docker',
'name': gen_string('alpha'),
'docker-upstream-name': 'fedora/rabbitmq',
}
)
self.assertIn('upstream-repository-name', repository)
self.assertEqual(repository['upstream-repository-name'], 'fedora/rabbitmq')
@tier1
def test_positive_create_with_name(self):
"""Check if repository can be created with random names
:id: 604dea2c-d512-4a27-bfc1-24c9655b6ea9
:expectedresults: Repository is created and has random name
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'name': name})
self.assertEqual(new_repo['name'], name)
@tier1
def test_positive_create_with_name_label(self):
"""Check if repository can be created with random names and
labels
:id: 79d2a6d0-5032-46cd-880c-46cf392521fa
:expectedresults: Repository is created and has random name and labels
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
# Generate a random, 'safe' label
label = gen_string('alpha', 20)
new_repo = self._make_repository({'label': label, 'name': name})
self.assertEqual(new_repo['name'], name)
self.assertEqual(new_repo['label'], label)
@tier1
def test_positive_create_with_yum_repo(self):
"""Create YUM repository
:id: 4c08824f-ba95-486c-94c2-9abf0a3441ea
:expectedresults: YUM repository is created
:CaseImportance: Critical
"""
for url in (
FAKE_0_YUM_REPO,
FAKE_1_YUM_REPO,
FAKE_2_YUM_REPO,
FAKE_3_YUM_REPO,
FAKE_4_YUM_REPO,
):
with self.subTest(url):
new_repo = self._make_repository({'content-type': 'yum', 'url': url})
self.assertEqual(new_repo['url'], url)
self.assertEqual(new_repo['content-type'], 'yum')
@tier1
@upgrade
def test_positive_create_with_puppet_repo(self):
"""Create Puppet repository
:id: 75c309ba-fbc9-419d-8427-7a61b063ec13
:expectedresults: Puppet repository is created
:CaseImportance: Critical
"""
for url in (
FAKE_1_PUPPET_REPO,
FAKE_2_PUPPET_REPO,
FAKE_3_PUPPET_REPO,
FAKE_4_PUPPET_REPO,
FAKE_5_PUPPET_REPO,
):
with self.subTest(url):
new_repo = self._make_repository({'content-type': 'puppet', 'url': url})
self.assertEqual(new_repo['url'], url)
self.assertEqual(new_repo['content-type'], 'puppet')
@tier1
@upgrade
def test_positive_create_with_file_repo(self):
"""Create file repository
:id: 46f63419-1acc-4ae2-be8c-d97816ba342f
:expectedresults: file repository is created
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'file', 'url': CUSTOM_FILE_REPO})
self.assertEqual(new_repo['url'], CUSTOM_FILE_REPO)
self.assertEqual(new_repo['content-type'], 'file')
@tier1
def test_positive_create_with_auth_yum_repo(self):
"""Create YUM repository with basic HTTP authentication
:id: da8309fd-3076-427b-a96f-8d883d6e944f
:expectedresults: YUM repository is created
:CaseImportance: Critical
"""
url = FAKE_5_YUM_REPO
for creds in valid_http_credentials(url_encoded=True):
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'yum', 'url': url_encoded})
self.assertEqual(new_repo['url'], url_encoded)
self.assertEqual(new_repo['content-type'], 'yum')
@tier1
@upgrade
def test_positive_create_with_download_policy(self):
"""Create YUM repositories with available download policies
:id: ffb386e6-c360-4d4b-a324-ccc21768b4f8
:expectedresults: YUM repository with a download policy is created
:CaseImportance: Critical
"""
for policy in DOWNLOAD_POLICIES:
with self.subTest(policy):
new_repo = self._make_repository(
{'content-type': 'yum', 'download-policy': policy}
)
self.assertEqual(new_repo['download-policy'], policy)
@tier1
@upgrade
def test_positive_create_with_mirror_on_sync(self):
"""Create YUM repositories with available mirror on sync rule
:id: 37a09a91-42fc-4271-b58b-8e00ef0dc5a7
:expectedresults: YUM repository created successfully and its mirror on
sync rule value can be read back
:BZ: 1383258
:CaseImportance: Critical
"""
for value in ['yes', 'no']:
with self.subTest(value):
new_repo = self._make_repository({'content-type': 'yum', 'mirror-on-sync': value})
self.assertEqual(new_repo['mirror-on-sync'], value)
@tier1
def test_positive_create_with_default_download_policy(self):
"""Verify if the default download policy is assigned when creating a
YUM repo without `--download-policy`
:id: 9a3c4d95-d6ca-4377-9873-2c552b7d6ce7
:expectedresults: YUM repository with a default download policy
:CaseImportance: Critical
"""
default_dl_policy = Settings.list({'search': 'name=default_download_policy'})
self.assertTrue(default_dl_policy)
new_repo = self._make_repository({'content-type': 'yum'})
self.assertEqual(new_repo['download-policy'], default_dl_policy[0]['value'])
@tier1
def test_positive_create_immediate_update_to_on_demand(self):
"""Update `immediate` download policy to `on_demand` for a newly
created YUM repository
:id: 1a80d686-3f7b-475e-9d1a-3e1f51d55101
:expectedresults: immediate download policy is updated to on_demand
:CaseImportance: Critical
:BZ: 1732056
"""
new_repo = self._make_repository({'content-type': 'yum'})
self.assertEqual(new_repo['download-policy'], 'immediate')
Repository.update({'id': new_repo['id'], 'download-policy': 'on_demand'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'on_demand')
@tier1
def test_positive_create_immediate_update_to_background(self):
"""Update `immediate` download policy to `background` for a newly
created YUM repository
:id: 7a9243eb-012c-40ad-9105-b078ed0a9eda
:expectedresults: immediate download policy is updated to background
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'immediate'})
Repository.update({'id': new_repo['id'], 'download-policy': 'background'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'background')
@tier1
def test_positive_create_on_demand_update_to_immediate(self):
"""Update `on_demand` download policy to `immediate` for a newly
created YUM repository
:id: 1e8338af-32e5-4f92-9215-bfdc1973c8f7
:expectedresults: on_demand download policy is updated to immediate
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'on_demand'})
Repository.update({'id': new_repo['id'], 'download-policy': 'immediate'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'immediate')
@tier1
def test_positive_create_on_demand_update_to_background(self):
"""Update `on_demand` download policy to `background` for a newly
created YUM repository
:id: da600200-5bd4-4cb8-a891-37cd2233803e
:expectedresults: on_demand download policy is updated to background
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'on_demand'})
Repository.update({'id': new_repo['id'], 'download-policy': 'background'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'background')
@tier1
def test_positive_create_background_update_to_immediate(self):
"""Update `background` download policy to `immediate` for a newly
created YUM repository
:id: cf4dca0c-36bd-4a3c-aa29-f435ac60b3f8
:expectedresults: background download policy is updated to immediate
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'background'})
Repository.update({'id': new_repo['id'], 'download-policy': 'immediate'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'immediate')
@tier1
def test_positive_create_background_update_to_on_demand(self):
"""Update `background` download policy to `on_demand` for a newly
created YUM repository
:id: 0f943e3d-44b7-4b6e-9a7d-d33f7f4864d1
:expectedresults: background download policy is updated to on_demand
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'background'})
Repository.update({'id': new_repo['id'], 'download-policy': 'on_demand'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'on_demand')
@tier1
def test_positive_create_with_auth_puppet_repo(self):
"""Create Puppet repository with basic HTTP authentication
:id: b13f8ae2-60ab-47e6-a096-d3f368e5cab3
:expectedresults: Puppet repository is created
:CaseImportance: Critical
"""
url = FAKE_7_PUPPET_REPO
for creds in valid_http_credentials(url_encoded=True):
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'puppet', 'url': url_encoded})
self.assertEqual(new_repo['url'], url_encoded)
self.assertEqual(new_repo['content-type'], 'puppet')
@tier1
@upgrade
def test_positive_create_with_gpg_key_by_id(self):
"""Check if repository can be created with gpg key ID
:id: 6d22f0ea-2d27-4827-9b7a-3e1550a47285
:expectedresults: Repository is created and has gpg key
:CaseImportance: Critical
"""
# Make a new gpg key
gpg_key = make_gpg_key({'organization-id': self.org['id']})
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'gpg-key-id': gpg_key['id'], 'name': name})
self.assertEqual(new_repo['gpg-key']['id'], gpg_key['id'])
self.assertEqual(new_repo['gpg-key']['name'], gpg_key['name'])
@tier1
def test_positive_create_with_gpg_key_by_name(self):
"""Check if repository can be created with gpg key name
:id: 95cde404-3449-410d-9a08-d7f8619a2ad5
:expectedresults: Repository is created and has gpg key
:BZ: 1103944
:CaseImportance: Critical
"""
gpg_key = make_gpg_key({'organization-id': self.org['id']})
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository(
{'gpg-key': gpg_key['name'], 'name': name, 'organization-id': self.org['id']}
)
self.assertEqual(new_repo['gpg-key']['id'], gpg_key['id'])
self.assertEqual(new_repo['gpg-key']['name'], gpg_key['name'])
@tier1
def test_positive_create_publish_via_http(self):
"""Create repository published via http
:id: faf6058c-9dd3-444c-ace2-c41791669e9e
:expectedresults: Repository is created and is published via http
:CaseImportance: Critical
"""
for use_http in 'true', 'yes', '1':
with self.subTest(use_http):
repo = self._make_repository({'publish-via-http': use_http})
self.assertEqual(repo['publish-via-http'], 'yes')
@tier1
def test_positive_create_publish_via_https(self):
"""Create repository not published via http
:id: 4395a5df-207c-4b34-a42d-7b3273bd68ec
:expectedresults: Repository is created and is not published via http
:CaseImportance: Critical
"""
for use_http in 'false', 'no', '0':
with self.subTest(use_http):
repo = self._make_repository({'publish-via-http': use_http})
self.assertEqual(repo['publish-via-http'], 'no')
@tier1
@upgrade
def test_positive_create_yum_repo_with_checksum_type(self):
"""Create a YUM repository with a checksum type
:id: 934f4a09-2a64-485d-ae6c-8ef73aa8fb2b
:expectedresults: A YUM repository is created and contains the correct
checksum type
:CaseImportance: Critical
"""
for checksum_type in 'sha1', 'sha256':
with self.subTest(checksum_type):
content_type = 'yum'
repository = self._make_repository(
{
'checksum-type': checksum_type,
'content-type': content_type,
'download-policy': 'immediate',
}
)
self.assertEqual(repository['content-type'], content_type)
self.assertEqual(repository['checksum-type'], checksum_type)
@tier1
def test_positive_create_docker_repo_with_upstream_name(self):
"""Create a Docker repository with upstream name.
:id: 776f92eb-8b40-4efd-8315-4fbbabcb2d4e
:expectedresults: Docker repository is created and contains correct
values.
:CaseImportance: Critical
"""
content_type = 'docker'
new_repo = self._make_repository(
{
'content-type': content_type,
'docker-upstream-name': 'busybox',
'name': 'busybox',
'url': DOCKER_REGISTRY_HUB,
}
)
# Assert that urls and content types matches data passed
self.assertEqual(new_repo['url'], DOCKER_REGISTRY_HUB)
self.assertEqual(new_repo['content-type'], content_type)
self.assertEqual(new_repo['name'], 'busybox')
@tier1
def test_positive_create_docker_repo_with_name(self):
"""Create a Docker repository with a random name.
:id: b6a01434-8672-4196-b61a-dcb86c49f43b
:expectedresults: Docker repository is created and contains correct
values.
:CaseImportance: Critical
"""
for name in valid_docker_repository_names():
with self.subTest(name):
content_type = 'docker'
new_repo = self._make_repository(
{
'content-type': content_type,
'docker-upstream-name': 'busybox',
'name': name,
'url': DOCKER_REGISTRY_HUB,
}
)
# Assert that urls, content types and name matches data passed
self.assertEqual(new_repo['url'], DOCKER_REGISTRY_HUB)
self.assertEqual(new_repo['content-type'], content_type)
self.assertEqual(new_repo['name'], name)
@tier2
def test_positive_create_puppet_repo_same_url_different_orgs(self):
"""Create two repos with the same URL in two different organizations.
:id: b3502064-f400-4e60-a11f-b3772bd23a98
:expectedresults: Repositories are created and puppet modules are
visible from different organizations.
:CaseLevel: Integration
"""
url = 'https://omaciel.fedorapeople.org/b3502064/'
# Create first repo
repo = self._make_repository({'content-type': 'puppet', 'url': url})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['puppet-modules'], '1')
# Create another org and repo
org = make_org()
product = make_product({'organization-id': org['id']})
new_repo = self._make_repository(
{'url': url, 'product': product, 'content-type': 'puppet'}
)
Repository.synchronize({'id': new_repo['id']})
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['content-counts']['puppet-modules'], '1')
@tier1
def test_negative_create_with_name(self):
"""Repository name cannot be 300-characters long
:id: af0652d3-012d-4846-82ac-047918f74722
:expectedresults: Repository cannot be created
:CaseImportance: Critical
"""
for name in invalid_values_list():
with self.subTest(name):
with self.assertRaises(CLIFactoryError):
self._make_repository({'name': name})
@tier1
def test_negative_create_with_auth_url_with_special_characters(self):
"""Verify that repository URL cannot contain unquoted special characters
:id: 2bd5ee17-0fe5-43cb-9cdc-dc2178c5374c
:expectedresults: Repository cannot be created
:CaseImportance: Critical
"""
# get a list of valid credentials without quoting them
for cred in [creds for creds in valid_http_credentials() if creds['quote'] is True]:
url_encoded = FAKE_5_YUM_REPO.format(cred['login'], cred['pass'])
with self.subTest(url_encoded):
with self.assertRaises(CLIFactoryError):
self._make_repository({'url': url_encoded})
@tier1
def test_negative_create_with_auth_url_too_long(self):
"""Verify that repository URL length is limited
:id: de356c66-4237-4421-89e3-f4f8bbe6f526
:expectedresults: Repository cannot be created
:CaseImportance: Critical
"""
for cred in invalid_http_credentials():
url_encoded = FAKE_5_YUM_REPO.format(cred['login'], cred['pass'])
with self.subTest(url_encoded):
with self.assertRaises(CLIFactoryError):
self._make_repository({'url': url_encoded})
@tier1
def test_negative_create_with_invalid_download_policy(self):
"""Verify that YUM repository cannot be created with invalid download
policy
:id: 3b143bf8-7056-4c94-910d-69a451071f26
:expectedresults: YUM repository is not created with invalid download
policy
:CaseImportance: Critical
"""
with self.assertRaises(CLIFactoryError):
self._make_repository(
{'content-type': 'yum', 'download-policy': gen_string('alpha', 5)}
)
@tier1
def test_negative_update_to_invalid_download_policy(self):
"""Verify that YUM repository cannot be updated to invalid download
policy
:id: 5bd6a2e4-7ff0-42ac-825a-6b2a2f687c89
:expectedresults: YUM repository is not updated to invalid download
policy
:CaseImportance: Critical
"""
with self.assertRaises(CLIReturnCodeError):
new_repo = self._make_repository({'content-type': 'yum'})
Repository.update({'id': new_repo['id'], 'download-policy': gen_string('alpha', 5)})
@tier1
def test_negative_create_non_yum_with_download_policy(self):
"""Verify that non-YUM repositories cannot be created with download
policy
:id: 71388973-50ea-4a20-9406-0aca142014ca
:expectedresults: Non-YUM repository is not created with a download
policy
:BZ: 1439835
:CaseImportance: Critical
"""
os_version = get_host_os_version()
# ostree is not supported for rhel6 so the following check
if os_version.startswith('RHEL6'):
non_yum_repo_types = [
item for item in REPO_TYPE.keys() if item != 'yum' and item != 'ostree'
]
else:
non_yum_repo_types = [item for item in REPO_TYPE.keys() if item != 'yum']
for content_type in non_yum_repo_types:
with self.subTest(content_type):
with self.assertRaisesRegex(
CLIFactoryError,
'Download policy Cannot set attribute download_policy for content type',
):
self._make_repository(
{'content-type': content_type, 'download-policy': 'on_demand'}
)
@tier1
def test_positive_synchronize_yum_repo(self):
"""Check if repository can be created and synced
:id: e3a62529-edbd-4062-9246-bef5f33bdcf0
:expectedresults: Repository is created and synced
:CaseLevel: Integration
:CaseImportance: Critical
"""
for url in FAKE_1_YUM_REPO, FAKE_3_YUM_REPO, FAKE_4_YUM_REPO:
with self.subTest(url):
new_repo = self._make_repository({'content-type': 'yum', 'url': url})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier1
def test_positive_synchronize_file_repo(self):
"""Check if repository can be created and synced
:id: eafc421d-153e-41e1-afbd-938e556ef827
:expectedresults: Repository is created and synced
:CaseLevel: Integration
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'file', 'url': CUSTOM_FILE_REPO})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
self.assertEqual(int(new_repo['content-counts']['files']), CUSTOM_FILE_REPO_FILES_COUNT)
@tier2
@upgrade
def test_positive_synchronize_auth_yum_repo(self):
"""Check if secured repository can be created and synced
:id: b0db676b-e0f0-428c-adf3-1d7c0c3599f0
:expectedresults: Repository is created and synced
:BZ: 1328092
:CaseLevel: Integration
"""
url = FAKE_5_YUM_REPO
for creds in [
cred for cred in valid_http_credentials(url_encoded=True) if cred['http_valid']
]:
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'yum', 'url': url_encoded})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier2
def test_negative_synchronize_auth_yum_repo(self):
"""Check if secured repo fails to synchronize with invalid credentials
:id: 809905ae-fb76-465d-9468-1f99c4274aeb
:expectedresults: Repository is created but synchronization fails
:BZ: 1405503, 1453118
:CaseLevel: Integration
"""
url = FAKE_5_YUM_REPO
for creds in [
cred for cred in valid_http_credentials(url_encoded=True) if not cred['http_valid']
]:
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'yum', 'url': url_encoded})
# Try to synchronize it
repo_sync = Repository.synchronize({'id': new_repo['id'], 'async': True})
response = Task.progress({'id': repo_sync[0]['id']}, return_raw_response=True)
if creds['original_encoding'] == 'utf8':
self.assertIn(
("Error retrieving metadata: 'latin-1' codec can't encode characters"),
''.join(response.stderr),
)
else:
self.assertIn(
'Error retrieving metadata: Unauthorized', ''.join(response.stderr)
)
@tier2
@upgrade
def test_positive_synchronize_auth_puppet_repo(self):
"""Check if secured puppet repository can be created and synced
:id: 1d2604fc-8a18-4cbe-bf4c-5c7d9fbdb82c
:expectedresults: Repository is created and synced
:BZ: 1405503
:CaseLevel: Integration
"""
url = FAKE_7_PUPPET_REPO
for creds in [
cred for cred in valid_http_credentials(url_encoded=True) if cred['http_valid']
]:
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'puppet', 'url': url_encoded})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier2
@upgrade
def test_positive_synchronize_docker_repo(self):
"""Check if Docker repository can be created and synced
:id: cb9ae788-743c-4785-98b2-6ae0c161bc9a
:expectedresults: Docker repository is created and synced
"""
new_repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'busybox',
'url': DOCKER_REGISTRY_HUB,
}
)
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier2
@upgrade
def test_positive_synchronize_docker_repo_with_tags_whitelist(self):
"""Check if only whitelisted tags are synchronized
:id: aa820c65-2de1-4b32-8890-98bd8b4320dc
:expectedresults: Only whitelisted tag is synchronized
"""
tags = 'latest'
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'alpine',
'url': DOCKER_REGISTRY_HUB,
'docker-tags-whitelist': tags,
}
)
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
self.assertIn(tags, repo['container-image-tags-filter'])
self.assertEqual(int(repo['content-counts']['container-image-tags']), 1)
@tier2
def test_positive_synchronize_docker_repo_set_tags_later(self):
"""Verify that adding tags whitelist and re-syncing after
synchronizing full repository doesn't remove content that was
already pulled in
:id: 97f2087f-6041-4242-8b7c-be53c68f46ff
:expectedresults: Non-whitelisted tags are not removed
"""
tags = 'latest'
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'hello-world',
'url': DOCKER_REGISTRY_HUB,
}
)
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
self.assertFalse(repo['container-image-tags-filter'])
self.assertGreaterEqual(int(repo['content-counts']['container-image-tags']), 2)
Repository.update({'id': repo['id'], 'docker-tags-whitelist': tags})
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
self.assertIn(tags, repo['container-image-tags-filter'])
self.assertGreaterEqual(int(repo['content-counts']['container-image-tags']), 2)
@tier2
def test_negative_synchronize_docker_repo_with_mix_valid_invalid_tags(self):
"""Set tags whitelist to contain both valid and invalid (non-existing)
tags. Check if only whitelisted tags are synchronized
:id: 75668da8-cc94-4d39-ade1-d3ef91edc812
:expectedresults: Only whitelisted tag is synchronized
"""
tags = ['latest', gen_string('alpha')]
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'alpine',
'url': DOCKER_REGISTRY_HUB,
'docker-tags-whitelist': ",".join(tags),
}
)
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
[self.assertIn(tag, repo['container-image-tags-filter']) for tag in tags]
self.assertEqual(int(repo['content-counts']['container-image-tags']), 1)
@tier2
def test_negative_synchronize_docker_repo_with_invalid_tags(self):
"""Set tags whitelist to contain only invalid (non-existing)
tags. Check that no data is synchronized.
:id: da05cdb1-2aea-48b9-9424-6cc700bc1194
:expectedresults: Tags are not synchronized
"""
tags = [gen_string('alpha') for _ in range(3)]
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'alpine',
'url': DOCKER_REGISTRY_HUB,
'docker-tags-whitelist': ",".join(tags),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
[self.assertIn(tag, repo['container-image-tags-filter']) for tag in tags]
self.assertEqual(int(repo['content-counts']['container-image-tags']), 0)
@tier2
def test_positive_resynchronize_rpm_repo(self):
"""Check that repository content is resynced after packages were
removed from repository
:id: a21b6710-4f12-4722-803e-3cb29d70eead
:expectedresults: Repository has updated non-zero packages count
:BZ: 1459845, 1459874, 1318004
:CaseLevel: Integration
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
# Find repo packages and remove them
packages = Package.list({'repository-id': repo['id']})
Repository.remove_content(
{'id': repo['id'], 'ids': [package['id'] for package in packages]}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
# Re-synchronize repository
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
@tier2
def test_positive_resynchronize_puppet_repo(self):
"""Check that repository content is resynced after puppet modules
were removed from repository
:id: 9e28f0ae-3875-4c1e-ad8b-d068f4409fe3
:expectedresults: Repository has updated non-zero puppet modules count
:BZ: 1459845, 1318004
:CaseLevel: Integration
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'puppet', 'url': FAKE_1_PUPPET_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['puppet-modules'], '2')
# Find repo packages and remove them
modules = PuppetModule.list({'repository-id': repo['id']})
Repository.remove_content({'id': repo['id'], 'ids': [module['id'] for module in modules]})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['puppet-modules'], '0')
# Re-synchronize repository
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['puppet-modules'], '2')
@tier2
def test_positive_synchronize_rpm_repo_ignore_content(self):
"""Synchronize yum repository with ignore content setting
:id: fa32ff10-e2e2-4ee0-b444-82f66f4a0e96
:expectedresults: Selected content types are ignored during
synchronization
:BZ: 1591358
:CaseLevel: Integration
"""
# Create repository and synchronize it
repo = self._make_repository(
{
'content-type': 'yum',
'url': FAKE_YUM_MIXED_REPO,
'ignorable-content': ['erratum', 'srpm', 'drpm'],
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
# Check synced content types
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '5', 'content not synced correctly')
self.assertEqual(repo['content-counts']['errata'], '0', 'content not ignored correctly')
self.assertEqual(
repo['content-counts']['source-rpms'], '0', 'content not ignored correctly'
)
# drpm check requires a different method
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
# expecting No such file or directory for drpms
self.assertEqual(result.return_code, 1)
self.assertIn('No such file or directory', result.stderr)
# Find repo packages and remove them
packages = Package.list({'repository-id': repo['id']})
Repository.remove_content(
{'id': repo['id'], 'ids': [package['id'] for package in packages]}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
# Update the ignorable-content setting
Repository.update({'id': repo['id'], 'ignorable-content': ['rpm']})
# Re-synchronize repository
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
# Re-check synced content types
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '0', 'content not ignored correctly')
self.assertEqual(repo['content-counts']['errata'], '2', 'content not synced correctly')
if not is_open('BZ:1664549'):
self.assertEqual(
repo['content-counts']['source-rpms'], '3', 'content not synced correctly'
)
if not is_open('BZ:1682951'):
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 4, 'content not synced correctly')
@tier1
def test_positive_update_url(self):
"""Update the original url for a repository
:id: 1a2cf29b-5c30-4d4c-b6d1-2f227b0a0a57
:expectedresults: Repository url is updated
:CaseImportance: Critical
"""
new_repo = self._make_repository()
# generate repo URLs with all valid credentials
auth_repos = [
repo.format(creds['login'], creds['pass'])
for creds in valid_http_credentials(url_encoded=True)
for repo in (FAKE_5_YUM_REPO, FAKE_7_PUPPET_REPO)
]
for url in [
FAKE_4_YUM_REPO,
FAKE_1_PUPPET_REPO,
FAKE_2_PUPPET_REPO,
FAKE_3_PUPPET_REPO,
FAKE_2_YUM_REPO,
] + auth_repos:
with self.subTest(url):
# Update the url
Repository.update({'id': new_repo['id'], 'url': url})
# Fetch it again
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['url'], url)
@tier1
def test_negative_update_auth_url_with_special_characters(self):
"""Verify that repository URL credentials cannot be updated to contain
the forbidden characters
:id: 566553b2-d077-4fd8-8ed5-00ba75355386
:expectedresults: Repository url not updated
:CaseImportance: Critical
"""
new_repo = self._make_repository()
# get auth repos with credentials containing unquoted special chars
auth_repos = [
repo.format(cred['login'], cred['pass'])
for cred in valid_http_credentials()
if cred['quote']
for repo in (FAKE_5_YUM_REPO, FAKE_7_PUPPET_REPO)
]
for url in auth_repos:
with self.subTest(url):
with self.assertRaises(CLIReturnCodeError):
Repository.update({'id': new_repo['id'], 'url': url})
# Fetch it again
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['url'], new_repo['url'])
@tier1
def test_negative_update_auth_url_too_long(self):
"""Update the original url for a repository to value which is too long
:id: a703de60-8631-4e31-a9d9-e51804f27f03
:expectedresults: Repository url not updated
:CaseImportance: Critical
"""
new_repo = self._make_repository()
# generate repo URLs with all invalid credentials
auth_repos = [
repo.format(cred['login'], cred['pass'])
for cred in invalid_http_credentials()
for repo in (FAKE_5_YUM_REPO, FAKE_7_PUPPET_REPO)
]
for url in auth_repos:
with self.subTest(url):
with self.assertRaises(CLIReturnCodeError):
Repository.update({'id': new_repo['id'], 'url': url})
# Fetch it again
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['url'], new_repo['url'])
@tier1
def test_positive_update_gpg_key(self):
"""Update the original gpg key
:id: 367ff375-4f52-4a8c-b974-8c1c54e3fdd3
:expectedresults: Repository gpg key is updated
:CaseImportance: Critical
"""
gpg_key = make_gpg_key({'organization-id': self.org['id']})
gpg_key_new = make_gpg_key({'organization-id': self.org['id']})
new_repo = self._make_repository({'gpg-key-id': gpg_key['id']})
Repository.update({'id': new_repo['id'], 'gpg-key-id': gpg_key_new['id']})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['gpg-key']['id'], gpg_key_new['id'])
@tier1
def test_positive_update_mirror_on_sync(self):
"""Update the mirror on sync rule for repository
:id: 9bab2537-3223-40d7-bc4c-a51b09d2e812
:expectedresults: Repository is updated
:CaseImportance: Critical
"""
new_repo = self._make_repository({'mirror-on-sync': 'no'})
Repository.update({'id': new_repo['id'], 'mirror-on-sync': 'yes'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['mirror-on-sync'], 'yes')
@tier1
def test_positive_update_publish_method(self):
"""Update the original publishing method
:id: e7bd2667-4851-4a64-9c70-1b5eafbc3f71
:expectedresults: Repository publishing method is updated
:CaseImportance: Critical
"""
new_repo = self._make_repository({'publish-via-http': 'no'})
Repository.update({'id': new_repo['id'], 'publish-via-http': 'yes'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['publish-via-http'], 'yes')
@tier1
def test_positive_update_checksum_type(self):
"""Create a YUM repository and update the checksum type
:id: 42f14257-d860-443d-b337-36fd355014bc
:expectedresults: A YUM repository is updated and contains the correct
checksum type
:CaseImportance: Critical
"""
content_type = 'yum'
repository = self._make_repository(
{'content-type': content_type, 'download-policy': 'immediate'}
)
self.assertEqual(repository['content-type'], content_type)
for checksum_type in 'sha1', 'sha256':
with self.subTest(checksum_type):
Repository.update({'checksum-type': checksum_type, 'id': repository['id']})
result = Repository.info({'id': repository['id']})
self.assertEqual(result['checksum-type'], checksum_type)
@tier1
def test_negative_create_checksum_with_on_demand_policy(self):
"""Attempt to create repository with checksum and on_demand policy.
:id: 33d712e6-e91f-42bb-8c5d-35bdc427182c
:expectedresults: A repository is not created and error is raised.
:CaseImportance: Critical
:BZ: 1732056
"""
for checksum_type in 'sha1', 'sha256':
with self.assertRaises(CLIFactoryError):
self._make_repository(
{
'content-type': 'yum',
'checksum-type': checksum_type,
'download-policy': 'on_demand',
}
)
@tier1
def test_positive_delete_by_id(self):
"""Check if repository can be created and deleted
:id: bcf096db-0033-4138-90a3-cb7355d5dfaf
:expectedresults: Repository is created and then deleted
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'name': name})
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
@upgrade
def test_positive_delete_by_name(self):
"""Check if repository can be created and deleted
:id: 463980a4-dbcf-4178-83a6-1863cf59909a
:expectedresults: Repository is created and then deleted
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'name': name})
Repository.delete({'name': new_repo['name'], 'product-id': self.product['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
def test_positive_delete_rpm(self):
"""Check if rpm repository with packages can be deleted.
:id: 1172492f-d595-4c8e-89c1-fabb21eb04ac
:expectedresults: Repository is deleted.
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': new_repo['id']})
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
# Check that there is at least one package
self.assertGreater(int(new_repo['content-counts']['packages']), 0)
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
def test_positive_delete_puppet(self):
"""Check if puppet repository with puppet modules can be deleted.
:id: 83d92454-11b7-4f9a-952d-650ffe5135e4
:expectedresults: Repository is deleted.
:BZ: 1316681
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'puppet', 'url': FAKE_1_PUPPET_REPO})
Repository.synchronize({'id': new_repo['id']})
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
# Check that there is at least one puppet module
self.assertGreater(int(new_repo['content-counts']['puppet-modules']), 0)
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
@upgrade
def test_positive_remove_content_by_repo_name(self):
"""Synchronize repository and remove rpm content from using repo name
:id: a8b6f17d-3b13-4185-920a-2558ace59458
:expectedresults: Content Counts shows zero packages
:BZ: 1349646, 1413145, 1459845, 1459874
:CaseImportance: Critical
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize(
{
'name': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
}
)
repo = Repository.info(
{
'name': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
}
)
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
# Find repo packages and remove them
packages = Package.list(
{
'repository': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
}
)
Repository.remove_content(
{
'name': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
'ids': [package['id'] for package in packages],
}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
@tier1
@upgrade
def test_positive_remove_content_rpm(self):
"""Synchronize repository and remove rpm content from it
:id: c4bcda0e-c0d6-424c-840d-26684ca7c9f1
:expectedresults: Content Counts shows zero packages
:BZ: 1459845, 1459874
:CaseImportance: Critical
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
# Find repo packages and remove them
packages = Package.list({'repository-id': repo['id']})
Repository.remove_content(
{'id': repo['id'], 'ids': [package['id'] for package in packages]}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
@tier1
@upgrade
def test_positive_remove_content_puppet(self):
"""Synchronize repository and remove puppet content from it
:id: b025ccd0-9beb-4ac0-9fbf-21340c90650e
:expectedresults: Content Counts shows zero puppet modules
:BZ: 1459845
:CaseImportance: Critical
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'puppet', 'url': FAKE_1_PUPPET_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['puppet-modules'], '2')
# Find puppet modules and remove them from repository
modules = PuppetModule.list({'repository-id': repo['id']})
Repository.remove_content({'id': repo['id'], 'ids': [module['id'] for module in modules]})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['puppet-modules'], '0')
@tier1
def test_positive_upload_content(self):
"""Create repository and upload content
:id: eb0ec599-2bf1-483a-8215-66652f948d67
:expectedresults: upload content is successful
:BZ: 1343006
:CaseImportance: Critical
"""
new_repo = self._make_repository({'name': gen_string('alpha')})
ssh.upload_file(
local_file=get_data_file(RPM_TO_UPLOAD), remote_file="/tmp/{0}".format(RPM_TO_UPLOAD)
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(RPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(RPM_TO_UPLOAD), result[0]['message']
)
@tier1
def test_positive_upload_content_to_file_repo(self):
"""Create file repository and upload content to it
:id: 5e24b416-2928-4533-96cf-6bffbea97a95
:customerscenario: true
:expectedresults: upload content operation is successful
:BZ: 1446975
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'file', 'url': CUSTOM_FILE_REPO})
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(int(new_repo['content-counts']['files']), CUSTOM_FILE_REPO_FILES_COUNT)
ssh.upload_file(
local_file=get_data_file(OS_TEMPLATE_DATA_FILE),
remote_file="/tmp/{0}".format(OS_TEMPLATE_DATA_FILE),
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(OS_TEMPLATE_DATA_FILE),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(OS_TEMPLATE_DATA_FILE), result[0]['message']
)
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(
int(new_repo['content-counts']['files']), CUSTOM_FILE_REPO_FILES_COUNT + 1
)
@pytest.mark.skip_if_open("BZ:1410916")
@tier2
def test_negative_restricted_user_cv_add_repository(self):
"""Attempt to add a product repository to content view with a
restricted user, using product name not visible to restricted user.
:id: 65792ae0-c5be-4a6c-9062-27dc03b83e10
:BZ: 1436209,1410916
:Steps:
1. Setup a restricted user with permissions that filter the
products with names like Test_* or "rhel7*"
2. Create a content view
3. Create a product with name that should not be visible to the
user and add a repository to it
:expectedresults:
1. The admin user can view the product repository
2. The restricted user cannot view the product repository
3. The restricted user cannot add the product repository to a
content view
4. After the attempt of adding the product repository to content
view, assert that the restricted user still cannot view the
product repository.
:CaseLevel: Integration
"""
required_permissions = {
'Katello::Product': (
[
'view_products',
'create_products',
'edit_products',
'destroy_products',
'sync_products',
'export_products',
],
'name ~ "Test_*" || name ~ "rhel7*"',
),
'Katello::ContentView': (
[
'view_content_views',
'create_content_views',
'edit_content_views',
'destroy_content_views',
'publish_content_views',
'promote_or_remove_content_views',
'export_content_views',
],
'name ~ "Test_*" || name ~ "rhel7*"',
),
'Organization': (
[
'view_organizations',
'create_organizations',
'edit_organizations',
'destroy_organizations',
'assign_organizations',
],
None,
),
}
user_name = gen_alphanumeric()
user_password = gen_alphanumeric()
# Generate a product name that is not like Test_* or rhel7*
product_name = 'zoo_{0}'.format(gen_string('alpha', 20))
# Generate a content view name like Test_*
content_view_name = 'Test_{0}'.format(gen_string('alpha', 20))
# Create an organization
org = make_org()
# Create a non admin user, for the moment without any permissions
user = make_user(
{
'admin': False,
'default-organization-id': org['id'],
'organization-ids': [org['id']],
'login': user_name,
'password': user_password,
}
)
# Create a new role
role = make_role()
# Get the available permissions
available_permissions = Filter.available_permissions()
# group the available permissions by resource type
available_rc_permissions = {}
for permission in available_permissions:
permission_resource = permission['resource']
if permission_resource not in available_rc_permissions:
available_rc_permissions[permission_resource] = []
available_rc_permissions[permission_resource].append(permission)
# create only the required role permissions per resource type
for resource_type, permission_data in required_permissions.items():
permission_names, search = permission_data
# assert that the required resource type is available
self.assertIn(resource_type, available_rc_permissions)
available_permission_names = [
permission['name']
for permission in available_rc_permissions[resource_type]
if permission['name'] in permission_names
]
# assert that all the required permissions are available
self.assertEqual(set(permission_names), set(available_permission_names))
# Create the current resource type role permissions
make_filter({'role-id': role['id'], 'permissions': permission_names, 'search': search})
# Add the created and initiated role with permissions to user
User.add_role({'id': user['id'], 'role-id': role['id']})
# assert that the user is not an admin one and cannot read the current
# role info (note: view_roles is not in the required permissions)
with self.assertRaises(CLIReturnCodeError) as context:
Role.with_user(user_name, user_password).info({'id': role['id']})
self.assertIn(
'Forbidden - server refused to process the request', context.exception.stderr
)
# Create a product
product = make_product({'organization-id': org['id'], 'name': product_name})
# Create a yum repository and synchronize
repo = make_repository({'product-id': product['id'], 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': repo['id']})
# Create a content view
content_view = make_content_view({'organization-id': org['id'], 'name': content_view_name})
# assert that the user can read the content view info as per required
# permissions
user_content_view = ContentView.with_user(user_name, user_password).info(
{'id': content_view['id']}
)
# assert that this is the same content view
self.assertEqual(content_view['name'], user_content_view['name'])
# assert admin user is able to view the product
repos = Repository.list({'organization-id': org['id']})
self.assertEqual(len(repos), 1)
# assert that this is the same repo
self.assertEqual(repos[0]['id'], repo['id'])
# assert that restricted user is not able to view the product
repos = Repository.with_user(user_name, user_password).list({'organization-id': org['id']})
self.assertEqual(len(repos), 0)
# assert that the user cannot add the product repo to content view
with self.assertRaises(CLIReturnCodeError):
ContentView.with_user(user_name, user_password).add_repository(
{
'id': content_view['id'],
'organization-id': org['id'],
'repository-id': repo['id'],
}
)
# assert that restricted user still not able to view the product
repos = Repository.with_user(user_name, user_password).list({'organization-id': org['id']})
self.assertEqual(len(repos), 0)
@tier2
def test_positive_upload_remove_srpm_content(self):
"""Create repository, upload and remove an SRPM content
:id: 706dc3e2-dacb-4fdd-8eef-5715ce498888
:expectedresults: SRPM successfully uploaded and removed
:CaseImportance: Critical
:BZ: 1378442
"""
new_repo = self._make_repository({'name': gen_string('alpha', 15)})
ssh.upload_file(
local_file=get_data_file(SRPM_TO_UPLOAD), remote_file="/tmp/{0}".format(SRPM_TO_UPLOAD)
)
# Upload SRPM
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(SRPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
'content-type': 'srpm',
}
)
assert "Successfully uploaded file '{0}'".format(SRPM_TO_UPLOAD) in result[0]['message']
assert int(Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']) == 1
# Remove uploaded SRPM
Repository.remove_content(
{
'id': new_repo['id'],
'ids': [Srpm.list({'repository-id': new_repo['id']})[0]['id']],
'content-type': 'srpm',
}
)
assert int(Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']) == 0
@upgrade
@tier2
def test_positive_srpm_list_end_to_end(self):
"""Create repository, upload, list and remove an SRPM content
:id: 98ad4228-f2e5-438a-9210-5ce6561769f2
:expectedresults:
1. SRPM should be listed repository wise.
2. SRPM should be listed product wise.
3. SRPM should be listed for specific and all Organizations.
4. SRPM should be listed LCE wise.
5. Able to see info of uploaded SRPM.
:CaseImportance: High
"""
new_repo = self._make_repository({'name': gen_string('alpha', 15)})
ssh.upload_file(
local_file=get_data_file(SRPM_TO_UPLOAD), remote_file="/tmp/{0}".format(SRPM_TO_UPLOAD)
)
# Upload SRPM
Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(SRPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
'content-type': 'srpm',
}
)
assert len(Srpm.list()) > 0
srpm_list = Srpm.list({'repository-id': new_repo['id']})
assert srpm_list[0]['filename'] == SRPM_TO_UPLOAD
assert len(srpm_list) == 1
assert Srpm.info({'id': srpm_list[0]['id']})[0]['filename'] == SRPM_TO_UPLOAD
assert int(Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']) == 1
assert (
len(
Srpm.list(
{
'organization': new_repo['organization'],
'product-id': new_repo['product']['id'],
'repository-id': new_repo['id'],
}
)
)
> 0
)
assert len(Srpm.list({'organization': new_repo['organization']})) > 0
assert (
len(
Srpm.list(
{'organization': new_repo['organization'], 'lifecycle-environment': 'Library'}
)
)
> 0
)
assert (
len(
Srpm.list(
{
'content-view': 'Default Organization View',
'lifecycle-environment': 'Library',
'organization': new_repo['organization'],
}
)
)
> 0
)
# Remove uploaded SRPM
Repository.remove_content(
{
'id': new_repo['id'],
'ids': [Srpm.list({'repository-id': new_repo['id']})[0]['id']],
'content-type': 'srpm',
}
)
assert int(
Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']
) == len(Srpm.list({'repository-id': new_repo['id']}))
@tier1
def test_positive_create_get_update_delete_module_streams(self):
"""Check module-stream get for each create, get, update, delete.
:id: e9001f76-9bc7-42a7-b8c9-2dccd5bf0b1f2f2e70b8-e446-4a28-9bae-fc870c80e83e
:Setup:
1. valid yum repo with Module Streams.
:Steps:
1. Create Yum Repository with url contain module-streams
2. Initialize synchronization
3. Another Repository with same Url
4. Module-Stream Get
5. Update the Module-Stream
6. Module-Stream Get
7. Delete Module-Stream
8. Module-Stream Get
:expectedresults: yum repository with modules is synced,
shows correct count and details with create, update, delete and
even duplicate repositories.
:CaseAutomation: automated
:CaseImportance: Critical
"""
org = make_org()
# Create a product
product = make_product({'organization-id': org['id']})
repo = make_repository(
{
'product-id': product['id'],
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(
repo['content-counts']['module-streams'], '7', 'Module Streams not synced correctly'
)
# adding repo with same yum url should not change count.
duplicate_repo = make_repository(
{
'product-id': product['id'],
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
}
)
Repository.synchronize({'id': duplicate_repo['id']})
module_streams = ModuleStream.list({'organization-id': org['id']})
self.assertEqual(len(module_streams), 7, 'Module Streams get worked correctly')
Repository.update(
{'product-id': product['id'], 'id': repo['id'], 'url': CUSTOM_MODULE_STREAM_REPO_2}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(
repo['content-counts']['module-streams'], '7', 'Module Streams not synced correctly'
)
Repository.delete({'id': repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': repo['id']})
@tier1
def test_module_stream_list_validation(self):
"""Check module-stream get with list on hammer.
:id: 9842a0c3-8532-4b16-a00a-534fc3b0a776ff89f23e-cd00-4d20-84d3-add0ea24abf8
:Setup:
1. valid yum repo with Module Streams.
:Steps:
1. Create Yum Repositories with url contain module-streams and Products
2. Initialize synchronization
3. Verify the module-stream list with various inputs options
:expectedresults: Verify the module-stream list response.
:CaseAutomation: automated
"""
repo1 = self._make_repository({'content-type': 'yum', 'url': CUSTOM_MODULE_STREAM_REPO_1})
Repository.synchronize({'id': repo1['id']})
product2 = make_product_wait({'organization-id': self.org['id']})
repo2 = self._make_repository(
{
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
'product-id': product2['id'],
}
)
Repository.synchronize({'id': repo2['id']})
module_streams = ModuleStream.list()
self.assertGreater(len(module_streams), 13, 'Module Streams get worked correctly')
module_streams = ModuleStream.list({'product-id': product2['id']})
self.assertEqual(len(module_streams), 7, 'Module Streams get worked correctly')
@tier1
def test_module_stream_info_validation(self):
"""Check module-stream get with info on hammer.
:id: ddbeb49e-d292-4dc4-8fb9-e9b768acc441a2c2e797-02b7-4b12-9f95-cffc93254198
:Setup:
1. valid yum repo with Module Streams.
:Steps:
1. Create Yum Repositories with url contain module-streams
2. Initialize synchronization
3. Verify the module-stream info with various inputs options
:expectedresults: Verify the module-stream info response.
:CaseAutomation: automated
"""
product2 = make_product_wait({'organization-id': self.org['id']})
repo2 = self._make_repository(
{
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
'product-id': product2['id'],
}
)
Repository.synchronize({'id': repo2['id']})
module_streams = ModuleStream.list(
{'repository-id': repo2['id'], 'search': 'name="walrus" and stream="5.21"'}
)
actual_result = ModuleStream.info({'id': module_streams[0]['id']})
expected_result = {
'module-stream-name': 'walrus',
'stream': '5.21',
'architecture': 'x86_64',
}
self.assertEqual(
expected_result,
{key: value for key, value in actual_result.items() if key in expected_result},
)
class OstreeRepositoryTestCase(CLITestCase):
"""Ostree Repository CLI tests."""
@classmethod
@skip_if_os('RHEL6')
def setUpClass(cls):
"""Create an organization and product which can be re-used in tests."""
super(OstreeRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
def _make_repository(self, options=None):
"""Makes a new repository and asserts its success"""
if options is None:
options = {}
if options.get('product-id') is None:
options['product-id'] = self.product['id']
return make_repository(options)
@tier1
def test_positive_create_ostree_repo(self):
"""Create a ostree repository
:id: a93c52e1-b32e-4590-981b-636ae8b8314d
:expectedresults: ostree repository is created
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository(
{
'name': name,
'content-type': 'ostree',
'publish-via-http': 'false',
'url': FEDORA27_OSTREE_REPO,
}
)
self.assertEqual(new_repo['name'], name)
self.assertEqual(new_repo['content-type'], 'ostree')
@pytest.mark.skip_if_open("BZ:1716429")
@tier1
def test_negative_create_ostree_repo_with_checksum(self):
"""Create a ostree repository with checksum type
:id: a334e0f7-e1be-4add-bbf2-2fd9f0b982c4
:expectedresults: Validation error is raised
:CaseImportance: Critical
:BZ: 1716429
"""
for checksum_type in 'sha1', 'sha256':
with self.subTest(checksum_type):
with self.assertRaisesRegex(
CLIFactoryError,
'Validation failed: Checksum type cannot be set for non-yum repositories',
):
self._make_repository(
{
'content-type': 'ostree',
'checksum-type': checksum_type,
'publish-via-http': 'false',
'url': FEDORA27_OSTREE_REPO,
}
)
@tier1
def test_negative_create_unprotected_ostree_repo(self):
"""Create a ostree repository and published via http
:id: 2b139560-65bb-4a40-9724-5cca57bd8d30
:expectedresults: ostree repository is not created
:CaseImportance: Critical
"""
for use_http in 'true', 'yes', '1':
with self.subTest(use_http):
with self.assertRaisesRegex(
CLIFactoryError,
'Validation failed: OSTree Repositories cannot be unprotected',
):
self._make_repository(
{
'content-type': 'ostree',
'publish-via-http': 'true',
'url': FEDORA27_OSTREE_REPO,
}
)
@tier2
@upgrade
@pytest.mark.skip_if_open("BZ:1625783")
def test_positive_synchronize_ostree_repo(self):
"""Synchronize ostree repo
:id: 64fcae0a-44ae-46ae-9938-032bba1331e9
:expectedresults: Ostree repository is created and synced
:CaseLevel: Integration
:BZ: 1625783
"""
new_repo = self._make_repository(
{'content-type': 'ostree', 'publish-via-http': 'false', 'url': FEDORA27_OSTREE_REPO}
)
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier1
def test_positive_delete_ostree_by_name(self):
"""Delete Ostree repository by name
:id: 0b545c22-acff-47b6-92ff-669b348f9fa6
:expectedresults: Repository is deleted by name
:CaseImportance: Critical
"""
new_repo = self._make_repository(
{'content-type': 'ostree', 'publish-via-http': 'false', 'url': FEDORA27_OSTREE_REPO}
)
Repository.delete(
{
'name': new_repo['name'],
'product': new_repo['product']['name'],
'organization': new_repo['organization'],
}
)
with self.assertRaises(CLIReturnCodeError):
Repository.info({'name': new_repo['name']})
@tier1
@upgrade
def test_positive_delete_ostree_by_id(self):
"""Delete Ostree repository by id
:id: 171917f5-1a1b-440f-90c7-b8418f1da132
:expectedresults: Repository is deleted by id
:CaseImportance: Critical
"""
new_repo = self._make_repository(
{'content-type': 'ostree', 'publish-via-http': 'false', 'url': FEDORA27_OSTREE_REPO}
)
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
class SRPMRepositoryTestCase(CLITestCase):
"""Tests specific to using repositories containing source RPMs."""
@classmethod
def setUpClass(cls):
"""Create a product and an org which can be re-used in tests."""
super(SRPMRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
@tier2
@pytest.mark.skip("Uses deprecated SRPM repository")
def test_positive_sync(self):
"""Synchronize repository with SRPMs
:id: eb69f840-122d-4180-b869-1bd37518480c
:expectedresults: srpms can be listed in repository
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_SRPM_REPO})
Repository.synchronize({'id': repo['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/Packages/t/ | grep .src.rpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@pytest.mark.skip("Uses deprecated SRPM repository")
def test_positive_sync_publish_cv(self):
"""Synchronize repository with SRPMs, add repository to content view
and publish content view
:id: 78cd6345-9c6c-490a-a44d-2ad64b7e959b
:expectedresults: srpms can be listed in content view
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_SRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/content_views/{}'
'/1.0/custom/{}/{}/Packages/t/ | grep .src.rpm'.format(
self.org['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@upgrade
@pytest.mark.skip("Uses deprecated SRPM repository")
def test_positive_sync_publish_promote_cv(self):
"""Synchronize repository with SRPMs, add repository to content view,
publish and promote content view to lifecycle environment
:id: 3d197118-b1fa-456f-980e-ad1a517bc769
:expectedresults: srpms can be listed in content view in proper
lifecycle environment
"""
lce = make_lifecycle_environment({'organization-id': self.org['id']})
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_SRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
content_view = ContentView.info({'id': cv['id']})
cvv = content_view['versions'][0]
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': lce['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/Packages/t'
' | grep .src.rpm'.format(
self.org['label'], lce['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@pytest.mark.skip_if_open("BZ:1682951")
class DRPMRepositoryTestCase(CLITestCase):
"""Tests specific to using repositories containing delta RPMs."""
@classmethod
def setUpClass(cls):
"""Create a product and an org which can be re-used in tests."""
super(DRPMRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
@tier2
@pytest.mark.skip("Uses deprecated DRPM repository")
def test_positive_sync(self):
"""Synchronize repository with DRPMs
:id: a645966c-750b-40ef-a264-dc3bb632b9fd
:expectedresults: drpms can be listed in repository
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_DRPM_REPO})
Repository.synchronize({'id': repo['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@pytest.mark.skip("Uses deprecated DRPM repository")
def test_positive_sync_publish_cv(self):
"""Synchronize repository with DRPMs, add repository to content view
and publish content view
:id: 014bfc80-4622-422e-a0ec-755b1d9f845e
:expectedresults: drpms can be listed in content view
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_DRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/content_views/{}'
'/1.0/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@upgrade
@pytest.mark.skip("Uses deprecated DRPM repository")
def test_positive_sync_publish_promote_cv(self):
"""Synchronize repository with DRPMs, add repository to content view,
publish and promote content view to lifecycle environment
:id: a01cb12b-d388-4902-8532-714f4e28ec56
:expectedresults: drpms can be listed in content view in proper
lifecycle environment
"""
lce = make_lifecycle_environment({'organization-id': self.org['id']})
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_DRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
content_view = ContentView.info({'id': cv['id']})
cvv = content_view['versions'][0]
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': lce['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}'
'/drpms/ | grep .drpm'.format(
self.org['label'], lce['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
class GitPuppetMirrorTestCase(CLITestCase):
"""Tests for creating the hosts via CLI."""
# Notes for Puppet GIT puppet mirror content
#
# This feature does not allow us to actually sync/update content in a
# GIT repo.
# Instead, we're essentially "snapshotting" what contains in a repo at any
# given time. The ability to update the GIT puppet mirror comes is/should
# be provided by pulp itself, via script. However, we should be able to
# create a sync schedule against the mirror to make sure it is periodically
# update to contain the latest and greatest.
@pytest.mark.stubbed
@tier2
def test_positive_git_local_create(self):
"""Create repository with local git puppet mirror.
:id: 89211cd5-82b8-4391-b729-a7502e57f824
:CaseLevel: Integration
:Setup: Assure local GIT puppet has been created and found by pulp
:Steps: Create link to local puppet mirror via cli
:expectedresults: Content source containing local GIT puppet mirror
content is created
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_local_update(self):
"""Update repository with local git puppet mirror.
:id: 341f40f2-3501-4754-9acf-7cda1a61f7db
:CaseLevel: Integration
:Setup: Assure local GIT puppet has been created and found by pulp
:Steps: Modify details for existing puppet repo (name, etc.) via cli
:expectedresults: Content source containing local GIT puppet mirror
content is modified
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
@upgrade
def test_positive_git_local_delete(self):
"""Delete repository with local git puppet mirror.
:id: a243f5bb-5186-41b3-8e8a-07d5cc784ccd
:CaseLevel: Integration
:Setup: Assure local GIT puppet has been created and found by pulp
:Steps: Delete link to local puppet mirror via cli
:expectedresults: Content source containing local GIT puppet mirror
content no longer exists/is available.
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_remote_create(self):
"""Create repository with remote git puppet mirror.
:id: 8582529f-3112-4b49-8d8f-f2bbf7dceca7
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps: Create link to local puppet mirror via cli
:expectedresults: Content source containing remote GIT puppet mirror
content is created
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_remote_update(self):
"""Update repository with remote git puppet mirror.
:id: 582c50b3-3b90-4244-b694-97642b1b13a9
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps: modify details for existing puppet repo (name, etc.) via cli
:expectedresults: Content source containing remote GIT puppet mirror
content is modified
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
@upgrade
def test_positive_git_remote_delete(self):
"""Delete repository with remote git puppet mirror.
:id: 0a23f969-b202-4c6c-b12e-f651a0b7d049
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps: Delete link to remote puppet mirror via cli
:expectedresults: Content source containing remote GIT puppet mirror
content no longer exists/is available.
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_sync(self):
"""Sync repository with git puppet mirror.
:id: a46c16bd-0986-48db-8e62-aeb3907ba4d2
:CaseLevel: Integration
:Setup: git mirror (local or remote) exists as a content source
:Steps: Attempt to sync content from mirror via cli
:expectedresults: Content is pulled down without error
:expectedresults: Confirmation that various resources actually exist in
local content repo
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
@upgrade
def test_positive_git_sync_with_content_change(self):
"""Sync repository with changes in git puppet mirror.
If module changes in GIT mirror but the version in manifest
does not change, content still pulled.
:id: 7d9519ca-8660-4014-8e0e-836594891c0c
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps:
1. Sync a git repo and observe the contents/checksum etc. of an
existing puppet module
2. Assure a puppet module in git repo has changed but the manifest
version for this module does not change.
3. Using pulp script, update repo mirror and re-sync within
satellite
4. View contents/details of same puppet module
:expectedresults: Puppet module has been updated in our content, even
though the module's version number has not changed.
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_sync_schedule(self):
"""Scheduled sync of git puppet mirror.
:id: 0d58d180-9836-4524-b608-66b67f9cab12
:CaseLevel: Integration
:Setup: git mirror (local or remote) exists as a content source
:Steps: Attempt to create a scheduled sync content from mirror, via cli
:expectedresults: Content is pulled down without error on expected
schedule
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_view_content(self):
"""View content in synced git puppet mirror
:id: 02f06092-dd6c-49fa-be9f-831e52476e41
:CaseLevel: Integration
:Setup: git mirror (local or remote) exists as a content source
:Steps: Attempt to list contents of repo via cli
:expectedresults: Spot-checked items (filenames, dates, perhaps
checksums?) are correct.
:CaseAutomation: notautomated
"""
class FileRepositoryTestCase(CLITestCase):
"""Specific tests for File Repositories"""
@classmethod
def setUpClass(cls):
"""Create a product and an org which can be re-used in tests."""
super(FileRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
@tier1
def test_positive_upload_file_to_file_repo(self):
"""Check arbitrary file can be uploaded to File Repository
:id: 134d668d-bd63-4475-bf7b-b899bb9fb7bb
:Steps:
1. Create a File Repository
2. Upload an arbitrary file to it
:Expectedresults: uploaded file is available under File Repository
:CaseAutomation: Automated
:CaseImportance: Critical
"""
new_repo = make_repository(
{'content-type': 'file', 'product-id': self.product['id'], 'url': CUSTOM_FILE_REPO}
)
ssh.upload_file(
local_file=get_data_file(RPM_TO_UPLOAD), remote_file="/tmp/{0}".format(RPM_TO_UPLOAD)
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(RPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(RPM_TO_UPLOAD), result[0]['message']
)
repo = Repository.info({'id': new_repo['id']})
self.assertEqual(repo['content-counts']['files'], '1')
filesearch = entities.File().search(
query={"search": "name={0} and repository={1}".format(RPM_TO_UPLOAD, new_repo['name'])}
)
self.assertEqual(RPM_TO_UPLOAD, filesearch[0].name)
@pytest.mark.stubbed
@tier1
def test_positive_file_permissions(self):
"""Check file permissions after file upload to File Repository
:id: 03da888a-69ba-492f-b204-c62d85948d8a
:Setup:
1. Create a File Repository
2. Upload an arbitrary file to it
:Steps: Retrieve file permissions from File Repository
:expectedresults: uploaded file permissions are kept after upload
:CaseAutomation: notautomated
:CaseImportance: Critical
"""
@tier1
@upgrade
def test_positive_remove_file(self):
"""Check arbitrary file can be removed from File Repository
:id: 07ca9c8d-e764-404e-866d-30d8cd2ca2b6
:Setup:
1. Create a File Repository
2. Upload an arbitrary file to it
:Steps: Remove a file from File Repository
:expectedresults: file is not listed under File Repository after
removal
:CaseImportance: Critical
"""
new_repo = make_repository(
{'content-type': 'file', 'product-id': self.product['id'], 'url': CUSTOM_FILE_REPO}
)
ssh.upload_file(
local_file=get_data_file(RPM_TO_UPLOAD), remote_file="/tmp/{0}".format(RPM_TO_UPLOAD)
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(RPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(RPM_TO_UPLOAD), result[0]['message']
)
repo = Repository.info({'id': new_repo['id']})
self.assertGreater(int(repo['content-counts']['files']), 0)
files = File.list({'repository-id': repo['id']})
Repository.remove_content({'id': repo['id'], 'ids': [file['id'] for file in files]})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['files'], '0')
@tier2
@upgrade
def test_positive_remote_directory_sync(self):
"""Check an entire remote directory can be synced to File Repository
through http
:id: 5c246307-8597-4f68-a6aa-4f1a6bbf0939
:Setup:
1. Create a directory to be synced with a pulp manifest on its root
2. Make the directory available through http
:Steps:
1. Create a File Repository with url pointing to http url
created on setup
2. Initialize synchronization
:expectedresults: entire directory is synced over http
"""
repo = make_repository(
{
'product-id': self.product['id'],
'content-type': 'file',
'url': FAKE_PULP_REMOTE_FILEREPO,
'name': gen_string('alpha'),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['files'], '2')
@tier1
def test_positive_file_repo_local_directory_sync(self):
"""Check an entire local directory can be synced to File Repository
:id: ee91ecd2-2f07-4678-b782-95a7e7e57159
:Setup:
1. Create a directory to be synced with a pulp manifest on its root
locally (on the Satellite/Foreman host)
:Steps:
1. Create a File Repository with url pointing to local url
created on setup
2. Initialize synchronization
:expectedresults: entire directory is synced
:CaseImportance: Critical
"""
# Making Setup For Creating Local Directory using Pulp Manifest
ssh.command("mkdir -p {}".format(CUSTOM_LOCAL_FOLDER))
ssh.command(
'wget -P {0} -r -np -nH --cut-dirs=5 -R "index.html*" '
'{1}'.format(CUSTOM_LOCAL_FOLDER, CUSTOM_FILE_REPO)
)
repo = make_repository(
{
'content-type': 'file',
'product-id': self.product['id'],
'url': 'file://{0}'.format(CUSTOM_LOCAL_FOLDER),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertGreater(repo['content-counts']['files'], '1')
@tier2
def test_positive_symlinks_sync(self):
"""Check symlinks can be synced to File Repository
:id: b0b0a725-b754-450b-bc0d-572d0294307a
:Setup:
1. Create a directory to be synced with a pulp manifest on its root
locally (on the Satellite/Foreman host)
2. Make sure it contains symlinks
:Steps:
1. Create a File Repository with url pointing to local url
created on setup
2. Initialize synchronization
:expectedresults: entire directory is synced, including files
referred by symlinks
:CaseAutomation: automated
"""
# Downloading the pulp repository into Satellite Host
ssh.command("mkdir -p {}".format(CUSTOM_LOCAL_FOLDER))
ssh.command(
'wget -P {0} -r -np -nH --cut-dirs=5 -R "index.html*" '
'{1}'.format(CUSTOM_LOCAL_FOLDER, CUSTOM_FILE_REPO)
)
ssh.command("ln -s {0} /{1}".format(CUSTOM_LOCAL_FOLDER, gen_string('alpha')))
repo = make_repository(
{
'content-type': 'file',
'product-id': self.product['id'],
'url': 'file://{0}'.format(CUSTOM_LOCAL_FOLDER),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertGreater(repo['content-counts']['files'], '1')
| gpl-3.0 | 4,146,998,596,047,780,000 | 36.402783 | 99 | 0.58873 | false |
matteocrippa/dsl-n55u-bender | release/src/router/openvpn/win/build_ddk.py | 11 | 1559 | import os
from wb import system, home_fn, choose_arch
def build_ddk(config, dir, x64):
ddk_path = config['DDK_PATH']
ddk_major = int(config['DDKVER_MAJOR'])
debug = 'PRODUCT_TAP_DEBUG' in config
return build_tap(ddk_path, ddk_major, debug, dir, x64)
def build_tap(ddk_path, ddk_major, debug, dir, x64):
"""Build drivers using WinDDK tools"""
setenv_bat = os.path.realpath(os.path.join(ddk_path, 'bin/setenv.bat'))
target = 'chk' if debug else 'fre'
if x64:
target += ' x64'
else:
target += ' x86'
if ddk_major >= 7600:
if x64:
target += ' wlh' # vista
else:
target += ' wnet' # server 2003
else:
if x64:
target += ' wnet' # server 2003
else:
target += ' w2k' # 2000
system('cmd /c "%s %s %s && cd %s && build -cef"' % (
setenv_bat,
os.path.realpath(ddk_path),
target,
dir
))
def main(config, proj, arch):
if proj == 'tap':
dir = home_fn('tap-win32')
elif proj == 'tapinstall':
dir = home_fn('tapinstall')
else:
raise ValueError("unknown project: %s" % (proj,))
for x64 in choose_arch(arch):
build_ddk(config, dir, x64)
# if we are run directly, and not loaded as a module
if __name__ == "__main__":
import sys
from wb import config
if len(sys.argv) >= 3:
main(config, sys.argv[1], sys.argv[2])
else:
print "usage: build <tap|tapinstall> <x64|x86|all>"
sys.exit(2)
| gpl-2.0 | -6,540,174,947,459,720,000 | 27.345455 | 75 | 0.54009 | false |
gnieboer/gnuradio | gr-wxgui/python/wxgui/const_window.py | 58 | 6131 | #
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
SLIDER_STEPS = 200
LOOP_BW_MIN_EXP, LOOP_BW_MAX_EXP = -6, 0.0
GAIN_MU_MIN_EXP, GAIN_MU_MAX_EXP = -6, -0.301
DEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'const_rate', 5)
DEFAULT_WIN_SIZE = (500, 400)
DEFAULT_CONST_SIZE = gr.prefs().get_long('wxgui', 'const_size', 2048)
CONST_PLOT_COLOR_SPEC = (0, 0, 1)
MARKER_TYPES = (
('Dot Small', 1.0),
('Dot Medium', 2.0),
('Dot Large', 3.0),
('Line Link', None),
)
DEFAULT_MARKER_TYPE = 2.0
##################################################
# Constellation window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = forms.static_box_sizer(
parent=self, label='Options',
bold=True, orient=wx.VERTICAL,
)
#loop_bw
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Loop Bandwidth',
converter=forms.float_converter(),
ps=parent, key=LOOP_BW_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=LOOP_BW_MIN_EXP,
max_exp=LOOP_BW_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=LOOP_BW_KEY,
)
#gain_mu
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Gain Mu',
converter=forms.float_converter(),
ps=parent, key=GAIN_MU_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=GAIN_MU_MIN_EXP,
max_exp=GAIN_MU_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=GAIN_MU_KEY,
)
#marker
control_box.AddStretchSpacer()
forms.drop_down(
sizer=control_box, parent=self,
ps=parent, key=MARKER_KEY, label='Marker',
choices=map(lambda x: x[1], MARKER_TYPES),
labels=map(lambda x: x[0], MARKER_TYPES),
)
#run/stop
control_box.AddStretchSpacer()
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# Constellation window with plotter and control panel
##################################################
class const_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
msg_key,
loop_bw_key,
gain_mu_key,
gain_omega_key,
omega_key,
sample_rate_key,
):
pubsub.pubsub.__init__(self)
#proxy the keys
self.proxy(MSG_KEY, controller, msg_key)
self.proxy(LOOP_BW_KEY, controller, loop_bw_key)
self.proxy(GAIN_MU_KEY, controller, gain_mu_key)
self.proxy(GAIN_OMEGA_KEY, controller, gain_omega_key)
self.proxy(OMEGA_KEY, controller, omega_key)
self.proxy(SAMPLE_RATE_KEY, controller, sample_rate_key)
#initialize values
self[RUNNING_KEY] = True
self[X_DIVS_KEY] = 8
self[Y_DIVS_KEY] = 8
self[MARKER_KEY] = DEFAULT_MARKER_TYPE
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.channel_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.set_x_label('Inphase')
self.plotter.set_y_label('Quadrature')
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(True)
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#alpha and gain mu 2nd orders
def set_gain_omega(gain_mu): self[GAIN_OMEGA_KEY] = .25*gain_mu**2
self.subscribe(GAIN_MU_KEY, set_gain_omega)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
self.subscribe(X_DIVS_KEY, self.update_grid)
self.subscribe(Y_DIVS_KEY, self.update_grid)
#initial update
self.update_grid()
def handle_msg(self, msg):
"""
Plot the samples onto the complex grid.
Args:
msg: the array of complex samples
"""
if not self[RUNNING_KEY]: return
#convert to complex floating point numbers
samples = numpy.fromstring(msg, numpy.complex64)
real = numpy.real(samples)
imag = numpy.imag(samples)
#plot
self.plotter.set_waveform(
channel=0,
samples=(real, imag),
color_spec=CONST_PLOT_COLOR_SPEC,
marker=self[MARKER_KEY],
)
#update the plotter
self.plotter.update()
def update_grid(self):
#update the x axis
x_max = 2.0
self.plotter.set_x_grid(-x_max, x_max, common.get_clean_num(2.0*x_max/self[X_DIVS_KEY]))
#update the y axis
y_max = 2.0
self.plotter.set_y_grid(-y_max, y_max, common.get_clean_num(2.0*y_max/self[Y_DIVS_KEY]))
#update plotter
self.plotter.update()
| gpl-3.0 | -8,390,796,735,511,075,000 | 28.056872 | 90 | 0.650302 | false |
Angoreher/xcero | stats/models.py | 1 | 1039 | # -*- coding: utf-8 -*-
""" Models for the stats application. """
# standard library
# django
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
# models
from base.models import BaseModel
from users.models import User
class Stat(BaseModel):
# foreign keys
user = models.ForeignKey(
User,
verbose_name=_('user'),
)
# required fields
name = models.CharField(
_('name'),
max_length=30,
blank=True,
)
# optional fields
class Meta:
verbose_name = _('stat')
verbose_name_plural = _('stats')
permissions = (
('view_stat', _('Can view stats')),
)
def __str__(self):
# TODO this is an example str return, change it
return self.name
def get_absolute_url(self):
""" Returns the canonical URL for the stat object """
# TODO this is an example, change it
return reverse('stat_detail', args=(self.pk,))
| mit | 4,308,593,348,742,390,000 | 23.162791 | 61 | 0.599615 | false |
jessstrap/servotk | tests/wpt/web-platform-tests/tools/wptserve/wptserve/ranges.py | 142 | 3004 | from .utils import HTTPException
class RangeParser(object):
def __call__(self, header, file_size):
prefix = "bytes="
if not header.startswith(prefix):
raise HTTPException(416, message="Unrecognised range type %s" % (header,))
parts = header[len(prefix):].split(",")
ranges = []
for item in parts:
components = item.split("-")
if len(components) != 2:
raise HTTPException(416, "Bad range specifier %s" % (item))
data = []
for component in components:
if component == "":
data.append(None)
else:
try:
data.append(int(component))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
try:
ranges.append(Range(data[0], data[1], file_size))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
return self.coalesce_ranges(ranges, file_size)
def coalesce_ranges(self, ranges, file_size):
rv = []
target = None
for current in reversed(sorted(ranges)):
if target is None:
target = current
else:
new = target.coalesce(current)
target = new[0]
if len(new) > 1:
rv.append(new[1])
rv.append(target)
return rv[::-1]
class Range(object):
def __init__(self, lower, upper, file_size):
self.file_size = file_size
self.lower, self.upper = self._abs(lower, upper)
if self.lower >= self.upper or self.lower >= self.file_size:
raise ValueError
def __repr__(self):
return "<Range %s-%s>" % (self.lower, self.upper)
def __lt__(self, other):
return self.lower < other.lower
def __gt__(self, other):
return self.lower > other.lower
def __eq__(self, other):
return self.lower == other.lower and self.upper == other.upper
def _abs(self, lower, upper):
if lower is None and upper is None:
lower, upper = 0, self.file_size
elif lower is None:
lower, upper = max(0, self.file_size - upper), self.file_size
elif upper is None:
lower, upper = lower, self.file_size
else:
lower, upper = lower, min(self.file_size, upper + 1)
return lower, upper
def coalesce(self, other):
assert self.file_size == other.file_size
if (self.upper < other.lower or self.lower > other.upper):
return sorted([self, other])
else:
return [Range(min(self.lower, other.lower),
max(self.upper, other.upper) - 1,
self.file_size)]
def header_value(self):
return "bytes %i-%i/%i" % (self.lower, self.upper - 1, self.file_size)
| mpl-2.0 | -8,742,856,931,317,612,000 | 32.377778 | 86 | 0.522636 | false |
rghe/ansible | lib/ansible/modules/packaging/os/opkg.py | 95 | 5138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Pelletier <[email protected]>
# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: opkg
author: "Patrick Pelletier (@skinp)"
short_description: Package manager for OpenWrt
description:
- Manages OpenWrt packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
default: present
force:
description:
- opkg --force parameter used
choices:
- ""
- "depends"
- "maintainer"
- "reinstall"
- "overwrite"
- "downgrade"
- "space"
- "postinstall"
- "remove"
- "checksum"
- "removal-of-dependent-packages"
default: absent
version_added: "2.0"
update_cache:
description:
- update the package db first
default: "no"
type: bool
requirements:
- opkg
- python
'''
EXAMPLES = '''
- opkg:
name: foo
state: present
- opkg:
name: foo
state: present
update_cache: yes
- opkg:
name: foo
state: absent
- opkg:
name: foo,bar
state: absent
- opkg:
name: foo
state: present
force: overwrite
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
def update_package_db(module, opkg_path):
""" Updates packages list. """
rc, out, err = module.run_command("%s update" % opkg_path)
if rc != 0:
module.fail_json(msg="could not update package db")
def query_package(module, opkg_path, name, state="present"):
""" Returns whether a package is installed or not. """
if state == "present":
rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
def remove_packages(module, opkg_path, packages):
""" Uninstalls one or more packages if installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
if query_package(module, opkg_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, opkg_path, packages):
""" Installs one or more packages if not already installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
install_c = 0
for package in packages:
if query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
if not query_package(module, opkg_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=["pkg"], required=True),
state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
"checksum", "removal-of-dependent-packages"]),
update_cache=dict(default="no", aliases=["update-cache"], type='bool')
)
)
opkg_path = module.get_bin_path('opkg', True, ['/bin'])
p = module.params
if p["update_cache"]:
update_package_db(module, opkg_path)
pkgs = p["name"].split(",")
if p["state"] in ["present", "installed"]:
install_packages(module, opkg_path, pkgs)
elif p["state"] in ["absent", "removed"]:
remove_packages(module, opkg_path, pkgs)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,605,373,370,097,007,900 | 25.348718 | 150 | 0.579798 | false |
greut/invenio-kwalitee | kwalitee/views.py | 3 | 12240 | # -*- coding: utf-8 -*-
#
# This file is part of kwalitee
# Copyright (C) 2014, 2015 CERN.
#
# kwalitee is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# kwalitee is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kwalitee; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Views like in MTV."""
from __future__ import unicode_literals
import requests
from flask import (current_app, render_template, make_response, json, jsonify,
request, url_for)
from werkzeug.exceptions import NotFound
from .tasks import pull_request, push, get_headers
from .models import db, Account, BranchStatus, CommitStatus, Repository
def status(sha):
"""Show the status of a commit.
**deprecated** static files aren't used anymore. To be removed at some
point.
:param sha: identifier of a commit.
"""
try:
with current_app.open_instance_resource(
"status_{sha}.txt".format(sha=sha), "r") as f:
status = f.read()
except IOError:
raise NotFound("{sha} was not found.".format(sha=sha))
status = status if len(status) > 0 else sha + ": Everything OK"
return render_template("status.html", status=status)
def index():
"""Homepage that lists the accounts."""
accounts = Account.query.order_by(db.asc(Account.name)).all()
return render_template("index.html", accounts=accounts)
def account(account):
"""Display the repositories linked with one account.
:param account: name of the account
"""
acc = _get_account(account)
return render_template("account.html",
account=acc,
repositories=acc.repositories)
def repository(account, repository, limit=50):
"""Display the recents commits and branches of a repository.
:param account: name of the owner
:param repository: name of the repository
:param limit: size of the commit window
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
commits = CommitStatus.query \
.filter_by(repository_id=repo.id) \
.order_by(db.desc(CommitStatus.id)) \
.limit(limit)
return render_template("repository.html",
account=acc,
repository=repo,
commits=commits)
def commit(account, repository, sha):
"""Display the status of a commit.
:param account: name of the owner
:param repository: name of the repository
:param sha: identifier of the commit
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
commit = CommitStatus.query.filter_by(repository_id=repo.id,
sha=sha).first_or_404()
return render_template("commit.html",
account=acc,
repository=repo,
commit=commit)
def branch(account, repository, branch):
"""Display the statuses of a branch.
:param account: name of the owner
:param repository: name of the repository
:param branch: name of the branch
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
all = BranchStatus.query.join(BranchStatus.commit) \
.filter(CommitStatus.repository_id == repo.id) \
.filter(BranchStatus.name == branch) \
.all()
if not all:
raise NotFound("{0.fullname} as no branches called {1}"
.format(repo, branch))
return render_template("branches.html",
account=acc,
repository=repo,
branches=all)
def branch_status(account, repository, branch, sha):
"""Display the status of a pull request.
:param account: name of the owner
:param repository: name of the repository
:param branch: name of the branch
:param sha: commit identifier of the commit related with the branch
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
branch = BranchStatus.query.join(BranchStatus.commit) \
.filter(CommitStatus.repository_id == repo.id) \
.filter(CommitStatus.sha == sha) \
.filter(BranchStatus.name == branch) \
.first_or_404()
return render_template("branch.html",
account=acc,
repository=repo,
branch=branch,
commit=branch.commit)
def payload():
"""Handle the GitHub events.
.. seealso::
`Event Types <https://developer.github.com/v3/activity/events/types/>`
"""
q = current_app.config["queue"]
events = ["push", "pull_request"]
try:
event = None
if "X-GitHub-Event" in request.headers:
event = request.headers["X-GitHub-Event"]
else:
raise ValueError("No X-GitHub-Event HTTP header found")
if event == "ping":
payload = {"message": "pong"}
elif event in events:
config = dict(current_app.config)
config.pop("queue")
timeout = config.pop("WORKER_TIMEOUT", None)
auto_create = config.pop("AUTO_CREATE", False)
data = json.loads(request.data)
repository_name = data["repository"]["name"]
keyname = "name" if event == "push" else "login"
owner_name = data["repository"]["owner"][keyname]
payload = {
"state": "pending",
"context": config.get("CONTEXT")
}
owner = Account.query.filter_by(name=owner_name).first()
if owner:
repository = Repository.query.filter_by(
name=repository_name,
owner_id=owner.id).first()
if not owner or not repository:
if auto_create:
owner = Account.find_or_create(owner_name)
repository = Repository.find_or_create(owner,
repository_name)
else:
payload["state"] = "error"
payload["description"] = "{0}/{1} is not yet registered" \
.format(owner_name,
repository_name)
if owner and repository:
if event == "push":
status_url = ""
commit_url = "https://api.github.com/repos/{owner}" \
"/{repo}/commits/{sha}"
for commit in data["commits"]:
cs = CommitStatus.find_or_create(repository,
commit["id"],
commit["url"])
status_url = url_for("commit",
account=owner.name,
repository=repository.name,
sha=cs.sha,
_external=True)
url = commit_url.format(
commit_url,
owner=owner.name,
repo=repository.name,
sha=cs.sha)
q.enqueue(push, cs.id, url, status_url, config,
timeout=timeout)
payload["target_url"] = status_url
payload["description"] = "commits queues"
elif event == "pull_request":
if data["action"] not in ["synchronize", "opened",
"reopened"]:
raise ValueError(
"Pull request action {0} is not supported"
.format(data["action"]))
repo = data["repository"]
data = data["pull_request"]
pull_request_url = data["url"]
commit_sha = data["head"]["sha"]
commits = []
headers = get_headers(Repository.query.filter_by(
name=repo["name"]).first(), config)
response = requests.get(data["commits_url"],
headers=headers)
response.raise_for_status() # check API rate limit
response_json = json.loads(response.content)
for commit in response_json:
cstat = CommitStatus.find_or_create(repository,
commit["sha"],
commit["html_url"])
commits.append(cstat)
bs = BranchStatus.find_or_create(commits[-1],
data["head"]["label"],
data["html_url"],
{"commits": commits})
status_url = url_for("branch_status",
account=owner.name,
repository=repository.name,
branch=bs.name,
sha=commit_sha,
_external=True)
q.enqueue(pull_request, bs.id, pull_request_url,
status_url, config, timeout=timeout)
payload["target_url"] = status_url
payload["description"] = "pull request {0} queued" \
.format(bs.name)
else:
raise ValueError("Event {0} is not supported".format(event))
return jsonify(payload=payload)
except Exception as e:
import traceback
# Uncomment to help you debugging the tests
# raise e
return make_response(jsonify(status="failure",
stacktrace=traceback.format_exc(),
exception=str(e)),
500)
def _get_account(account_name):
"""Get the account by name.
:param account_name: name of the account
:raise NotFound: if the account cannot be found
"""
account = Account.query.filter_by(name=account_name).first()
if not account:
raise NotFound("{0} isn't registered yet.".format(account_name))
return account
def _get_repository(account, repository_name):
"""Get the repository by name.
:param account: account
:param repository_name: name of the repository
:raise NotFound: if the repository cannot be found
"""
repository = Repository.query.filter_by(owner_id=account.id,
name=repository_name).first()
if not repository:
raise NotFound("{0}/{1} isn't registered yet.".format(account.name,
repository_name))
return repository
| gpl-2.0 | -8,063,789,881,589,458,000 | 37.980892 | 79 | 0.506454 | false |
andrewleech/SickRage | sickbeard/providers/hdtorrents_it.py | 3 | 8277 | # coding=utf-8
# Author: Dustyn Gibson <[email protected]>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
from requests.utils import dict_from_cookiejar
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from six.moves.urllib.parse import quote_plus
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class HDTorrentsProvider_IT(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "HDTorrents.it")
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.freeleech = None
self.urls = {'base_url': 'http://hdtorrents.it',
'login': 'http://hdtorrents.it/takelogin.php',
'search': 'http://hdtorrents.it/browse.php?search=%s',
'rss': 'http://hdtorrents.it/browse.php?search=%s',
'home': 'http://hdtorrents.it/%s'}
self.url = self.urls['base_url']
self.proper_strings = ['PROPER', 'REPACK']
self.cache = tvcache.TVCache(self, min_time=30) # only poll HDTorrents every 30 minutes ma
def _check_auth(self):
if not self.username or not self.password:
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'username': self.username,
'password': self.password,
'submit': 'Accedi!'}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('Lei non e registrato in sistema.', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
if not self.login():
return results
for mode in search_strings:
items = []
logger.log("Search Mode: {}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
search_url = self.urls['search'] % quote_plus(search_string)
logger.log("Search string: {}".format(search_string), logger.DEBUG)
else:
search_url = self.urls['rss']
if self.freeleech:
search_url = search_url.replace('active=1', 'active=5')
logger.log("Search URL: {}".format(search_url), logger.DEBUG)
data = self.get_url(search_url)
if not data or 'Error' in data:
logger.log("No data returned from provider", logger.DEBUG)
continue
if data.find('Non abbiamo trovato nulla') != -1:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
# Search result page contains some invalid html that prevents html parser from returning all data.
# We cut everything before the table that contains the data we are interested in thus eliminating
# the invalid html portions
try:
index = data.lower().index('<tbody id="highlighted"')
except ValueError:
logger.log("Could not find table of torrents highlighted", logger.DEBUG)
continue
# data = urllib.unquote(data[index:].encode('utf-8')).decode('utf-8').replace('\t', '')
data = data[index:]
with BS4Parser(data, 'html5lib') as html:
if not html:
logger.log("No html data parsed from provider", logger.DEBUG)
continue
torrent_rows = []
torrent_table = html.find('table', class_='highlighted')
if torrent_table:
torrent_rows = torrent_table.find_all('tr')
if not torrent_rows:
logger.log("Could not find results in returned data", logger.DEBUG)
continue
# Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in torrent_rows[0].find_all('td')]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result.findChildren('td')[:len(labels)]
if len(cells) < len(labels):
continue
title = cells[labels.index(1)].a.index(0).get_text(strip=True)
seeders = try_int(cells[labels.index(5)].a.index(0).get_text(strip=True))
leechers = try_int(cells[labels.index(5)].a.index(1).get_text(strip=True))
torrent_size = cells[labels.index(4)].get_text()
size = convert_size(torrent_size) or -1
download_url = self.url + '/' + cells[labels.index(1)].a.index(0)['href']
# title = cells[labels.index(u'Filename')].a.get_text(strip=True)
# seeders = try_int(cells[labels.index(u'S')].get_text(strip=True))
# leechers = try_int(cells[labels.index(u'L')].get_text(strip=True))
# torrent_size = cells[labels.index(u'Size')].get_text()
# size = convert_size(torrent_size) or -1
# download_url = self.url + '/' + cells[labels.index(u'Dl')].a['href']
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
def seed_ratio(self):
return self.ratio
provider = HDTorrentsProvider_IT()
| gpl-3.0 | 4,297,038,823,792,430,000 | 41.88601 | 179 | 0.550441 | false |
kxliugang/edx-platform | lms/djangoapps/lti_provider/tests/test_tasks.py | 36 | 4381 | """
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
import ddt
from django.test import TestCase
from mock import patch, MagicMock
from student.tests.factories import UserFactory
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
import lti_provider.tasks as tasks
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
class BaseOutcomeTest(TestCase):
"""
Super type for tests of both the leaf and composite outcome celery tasks.
"""
def setUp(self):
super(BaseOutcomeTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
self.consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=self.consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
version_number=1,
)
self.assignment.save()
self.send_score_update_mock = self.setup_patch(
'lti_provider.outcomes.send_score_update', None
)
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
@ddt.ddt
class SendLeafOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_leaf_outcome method in tasks.py
"""
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score(self, earned, possible, expected):
tasks.send_leaf_outcome(
self.assignment.id, # pylint: disable=no-member
earned,
possible
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
@ddt.ddt
class SendCompositeOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_composite_outcome method in tasks.py
"""
def setUp(self):
super(SendCompositeOutcomeTest, self).setUp()
self.descriptor = MagicMock()
self.descriptor.location = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='problem',
)
self.weighted_scores = MagicMock()
self.weighted_scores_mock = self.setup_patch(
'lti_provider.tasks.get_weighted_scores', self.weighted_scores
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
self.check_result_mock = self.setup_patch(
'lti_provider.tasks.modulestore',
self.module_store
)
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score_score(self, earned, possible, expected):
self.weighted_scores.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
def test_outcome_with_outdated_version(self):
self.assignment.version_number = 2
self.assignment.save()
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member
)
self.assertEqual(self.weighted_scores_mock.call_count, 0)
| agpl-3.0 | 5,071,849,225,120,550,000 | 32.189394 | 102 | 0.611048 | false |
openhatch/oh-mainline | vendor/packages/gdata/src/atom/http_core.py | 40 | 19862 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# TODO: add proxy handling.
__author__ = '[email protected] (Jeff Scudder)'
import os
import StringIO
import urlparse
import urllib
import httplib
ssl = None
try:
import ssl
except ImportError:
pass
class Error(Exception):
pass
class UnknownSize(Error):
pass
class ProxyError(Error):
pass
MIME_BOUNDARY = 'END_OF_PART'
def get_headers(http_response):
"""Retrieves all HTTP headers from an HTTP response from the server.
This method is provided for backwards compatibility for Python2.2 and 2.3.
The httplib.HTTPResponse object in 2.2 and 2.3 does not have a getheaders
method so this function will use getheaders if available, but if not it
will retrieve a few using getheader.
"""
if hasattr(http_response, 'getheaders'):
return http_response.getheaders()
else:
headers = []
for header in (
'location', 'content-type', 'content-length', 'age', 'allow',
'cache-control', 'content-location', 'content-encoding', 'date',
'etag', 'expires', 'last-modified', 'pragma', 'server',
'set-cookie', 'transfer-encoding', 'vary', 'via', 'warning',
'www-authenticate', 'gdata-version'):
value = http_response.getheader(header, None)
if value is not None:
headers.append((header, value))
return headers
class HttpRequest(object):
"""Contains all of the parameters for an HTTP 1.1 request.
The HTTP headers are represented by a dictionary, and it is the
responsibility of the user to ensure that duplicate field names are combined
into one header value according to the rules in section 4.2 of RFC 2616.
"""
method = None
uri = None
def __init__(self, uri=None, method=None, headers=None):
"""Construct an HTTP request.
Args:
uri: The full path or partial path as a Uri object or a string.
method: The HTTP method for the request, examples include 'GET', 'POST',
etc.
headers: dict of strings The HTTP headers to include in the request.
"""
self.headers = headers or {}
self._body_parts = []
if method is not None:
self.method = method
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
self.uri = uri or Uri()
def add_body_part(self, data, mime_type, size=None):
"""Adds data to the HTTP request body.
If more than one part is added, this is assumed to be a mime-multipart
request. This method is designed to create MIME 1.0 requests as specified
in RFC 1341.
Args:
data: str or a file-like object containing a part of the request body.
mime_type: str The MIME type describing the data
size: int Required if the data is a file like object. If the data is a
string, the size is calculated so this parameter is ignored.
"""
if isinstance(data, str):
size = len(data)
if size is None:
# TODO: support chunked transfer if some of the body is of unknown size.
raise UnknownSize('Each part of the body must have a known size.')
if 'Content-Length' in self.headers:
content_length = int(self.headers['Content-Length'])
else:
content_length = 0
# If this is the first part added to the body, then this is not a multipart
# request.
if len(self._body_parts) == 0:
self.headers['Content-Type'] = mime_type
content_length = size
self._body_parts.append(data)
elif len(self._body_parts) == 1:
# This is the first member in a mime-multipart request, so change the
# _body_parts list to indicate a multipart payload.
self._body_parts.insert(0, 'Media multipart posting')
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
content_length += len(boundary_string) + size
self._body_parts.insert(1, boundary_string)
content_length += len('Media multipart posting')
# Put the content type of the first part of the body into the multipart
# payload.
original_type_string = 'Content-Type: %s\r\n\r\n' % (
self.headers['Content-Type'],)
self._body_parts.insert(2, original_type_string)
content_length += len(original_type_string)
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.append(boundary_string)
content_length += len(boundary_string)
# Change the headers to indicate this is now a mime multipart request.
self.headers['Content-Type'] = 'multipart/related; boundary="%s"' % (
MIME_BOUNDARY,)
self.headers['MIME-version'] = '1.0'
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.append(type_string)
content_length += len(type_string)
self._body_parts.append(data)
ending_boundary_string = '\r\n--%s--' % (MIME_BOUNDARY,)
self._body_parts.append(ending_boundary_string)
content_length += len(ending_boundary_string)
else:
# This is a mime multipart request.
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.insert(-1, boundary_string)
content_length += len(boundary_string) + size
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.insert(-1, type_string)
content_length += len(type_string)
self._body_parts.insert(-1, data)
self.headers['Content-Length'] = str(content_length)
# I could add an "append_to_body_part" method as well.
AddBodyPart = add_body_part
def add_form_inputs(self, form_data,
mime_type='application/x-www-form-urlencoded'):
"""Form-encodes and adds data to the request body.
Args:
form_data: dict or sequnce or two member tuples which contains the
form keys and values.
mime_type: str The MIME type of the form data being sent. Defaults
to 'application/x-www-form-urlencoded'.
"""
body = urllib.urlencode(form_data)
self.add_body_part(body, mime_type)
AddFormInputs = add_form_inputs
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request
def _dump(self):
"""Converts to a printable string for debugging purposes.
In order to preserve the request, it does not read from file-like objects
in the body.
"""
output = 'HTTP Request\n method: %s\n url: %s\n headers:\n' % (
self.method, str(self.uri))
for header, value in self.headers.iteritems():
output += ' %s: %s\n' % (header, value)
output += ' body sections:\n'
i = 0
for part in self._body_parts:
if isinstance(part, (str, unicode)):
output += ' %s: %s\n' % (i, part)
else:
output += ' %s: <file like object>\n' % i
i += 1
return output
def _apply_defaults(http_request):
if http_request.uri.scheme is None:
if http_request.uri.port == 443:
http_request.uri.scheme = 'https'
else:
http_request.uri.scheme = 'http'
class Uri(object):
"""A URI as used in HTTP 1.1"""
scheme = None
host = None
port = None
path = None
def __init__(self, scheme=None, host=None, port=None, path=None, query=None):
"""Constructor for a URI.
Args:
scheme: str This is usually 'http' or 'https'.
host: str The host name or IP address of the desired server.
post: int The server's port number.
path: str The path of the resource following the host. This begins with
a /, example: '/calendar/feeds/default/allcalendars/full'
query: dict of strings The URL query parameters. The keys and values are
both escaped so this dict should contain the unescaped values.
For example {'my key': 'val', 'second': '!!!'} will become
'?my+key=val&second=%21%21%21' which is appended to the path.
"""
self.query = query or {}
if scheme is not None:
self.scheme = scheme
if host is not None:
self.host = host
if port is not None:
self.port = port
if path:
self.path = path
def _get_query_string(self):
param_pairs = []
for key, value in self.query.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def _get_relative_path(self):
"""Returns the path with the query parameters escaped and appended."""
param_string = self._get_query_string()
if self.path is None:
path = '/'
else:
path = self.path
if param_string:
return '?'.join([path, param_string])
else:
return path
def _to_string(self):
if self.scheme is None and self.port == 443:
scheme = 'https'
elif self.scheme is None:
scheme = 'http'
else:
scheme = self.scheme
if self.path is None:
path = '/'
else:
path = self.path
if self.port is None:
return '%s://%s%s' % (scheme, self.host, self._get_relative_path())
else:
return '%s://%s:%s%s' % (scheme, self.host, str(self.port),
self._get_relative_path())
def __str__(self):
return self._to_string()
def modify_request(self, http_request=None):
"""Sets HTTP request components based on the URI."""
if http_request is None:
http_request = HttpRequest()
if http_request.uri is None:
http_request.uri = Uri()
# Determine the correct scheme.
if self.scheme:
http_request.uri.scheme = self.scheme
if self.port:
http_request.uri.port = self.port
if self.host:
http_request.uri.host = self.host
# Set the relative uri path
if self.path:
http_request.uri.path = self.path
if self.query:
http_request.uri.query = self.query.copy()
return http_request
ModifyRequest = modify_request
def parse_uri(uri_string):
"""Creates a Uri object which corresponds to the URI string.
This method can accept partial URIs, but it will leave missing
members of the Uri unset.
"""
parts = urlparse.urlparse(uri_string)
uri = Uri()
if parts[0]:
uri.scheme = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
uri.host = host_parts[0]
if len(host_parts) > 1:
uri.port = int(host_parts[1])
if parts[2]:
uri.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = None
return uri
parse_uri = staticmethod(parse_uri)
ParseUri = parse_uri
parse_uri = Uri.parse_uri
ParseUri = Uri.parse_uri
class HttpResponse(object):
status = None
reason = None
_body = None
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def getheaders(self):
return self._headers
def read(self, amt=None):
if self._body is None:
return None
if not amt:
return self._body.read()
else:
return self._body.read(amt)
def _dump_response(http_response):
"""Converts to a string for printing debug messages.
Does not read the body since that may consume the content.
"""
output = 'HttpResponse\n status: %s\n reason: %s\n headers:' % (
http_response.status, http_response.reason)
headers = get_headers(http_response)
if isinstance(headers, dict):
for header, value in headers.iteritems():
output += ' %s: %s\n' % (header, value)
else:
for pair in headers:
output += ' %s: %s\n' % (pair[0], pair[1])
return output
class HttpClient(object):
"""Performs HTTP requests using httplib."""
debug = None
def request(self, http_request):
return self._http_request(http_request.method, http_request.uri,
http_request.headers, http_request._body_parts)
Request = request
def _get_connection(self, uri, headers=None):
"""Opens a socket connection to the server to set up an HTTP request.
Args:
uri: The full URL for the request as a Uri object.
headers: A dict of string pairs containing the HTTP headers for the
request.
"""
connection = None
if uri.scheme == 'https':
if not uri.port:
connection = httplib.HTTPSConnection(uri.host)
else:
connection = httplib.HTTPSConnection(uri.host, int(uri.port))
else:
if not uri.port:
connection = httplib.HTTPConnection(uri.host)
else:
connection = httplib.HTTPConnection(uri.host, int(uri.port))
return connection
def _http_request(self, method, uri, headers=None, body_parts=None):
"""Makes an HTTP request using httplib.
Args:
method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc.
uri: str or atom.http_core.Uri
headers: dict of strings mapping to strings which will be sent as HTTP
headers in the request.
body_parts: list of strings, objects with a read method, or objects
which can be converted to strings using str. Each of these
will be sent in order as the body of the HTTP request.
"""
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
connection = self._get_connection(uri, headers=headers)
if self.debug:
connection.debuglevel = 1
if connection.host != uri.host:
connection.putrequest(method, str(uri))
else:
connection.putrequest(method, uri._get_relative_path())
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (uri.scheme == 'https' and int(uri.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % uri.host
replacement_header_line = 'Host: %s' % uri.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name, value in headers.iteritems():
connection.putheader(header_name, value)
connection.endheaders()
# If there is data, send it in the request.
if body_parts and filter(lambda x: x != '', body_parts):
for part in body_parts:
_send_data_part(part, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _send_data_part(data, connection):
if isinstance(data, (str, unicode)):
# I might want to just allow str, not unicode.
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
class ProxiedHttpClient(HttpClient):
def _get_connection(self, uri, headers=None):
# Check to see if there are proxy settings required for this request.
proxy = None
if uri.scheme == 'https':
proxy = os.environ.get('https_proxy')
elif uri.scheme == 'http':
proxy = os.environ.get('http_proxy')
if not proxy:
return HttpClient._get_connection(self, uri, headers=headers)
# Now we have the URL of the appropriate proxy server.
# Get a username and password for the proxy if required.
proxy_auth = _get_proxy_auth()
if uri.scheme == 'https':
import socket
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = uri.port
if not port:
port = 443
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (uri.host, port)
# Set the user agent to send to the proxy
user_agent = ''
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_uri.host, int(proxy_uri.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl is not None:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, Nonesock_)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_uri.host)
connection.sock = sslobj
return connection
elif uri.scheme == 'http':
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_uri.host, int(proxy_uri.port))
return None
def _get_proxy_auth():
import base64
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.b64encode('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
| agpl-3.0 | 5,632,854,148,327,313,000 | 32.269682 | 79 | 0.632917 | false |
duyet-website/api.duyet.net | lib/faker/providers/address/uk_UA/__init__.py | 3 | 5601 | # coding=utf-8
from __future__ import unicode_literals
from random import randint
from .. import Provider as AddressProvider
class Provider(AddressProvider):
address_formats = ['{{street_address}}, {{city}}, {{postcode}}']
building_number_formats = ['#', '##', '###']
city_formats = ['{{city_prefix}} {{first_name}}']
street_address_formats = ['{{street_name}}, {{building_number}}']
street_name_formats = ['{{street_prefix}} {{last_name}}',
'{{last_name}} {{street_suffix}}']
city_prefixes = ['місто', 'село', 'селище', 'хутір']
countries = [
'Австралія', 'Австрія', 'Азербайджан', 'Албанія', 'Алжир', 'Ангола',
'Андорра', 'Антигуа і Барбуда', 'Аргентина', 'Афганістан',
'Багамські Острови', 'Бангладеш', 'Барбадос', 'Бахрейн', 'Беліз',
'Бельгія', 'Бенін', 'Білорусь', 'Болгарія', 'Болівія',
'Боснія і Герцеговина', 'Ботсвана', 'Бразилія', 'Бруней',
'Буркіна-Фасо', 'Бурунді', 'Бутан', 'Вануату', 'Ватикан',
'Велика Британія', 'Венесуела', 'В\'єтнам', 'Вірменія', 'Габон',
'Гаїті', 'Гаяна', 'Гамбія', 'Гана', 'Гватемала', 'Гвінея',
'Гвінея-Бісау', 'Гондурас', 'Гренада', 'Греція', 'Грузія', 'Данія',
'Джибуті', 'Домініка', 'Домініканська Республіка', 'Еквадор',
'Екваторіальна Гвінея', 'Еритрея', 'Естонія', 'Ефіопія', 'Єгипет',
'Ємен', 'Замбія', 'Західна Сахара', 'Зімбабве', 'Ізраїль', 'Індія',
'Індонезія', 'Ірак', 'Іран', 'Ірландія', 'Ісландія', 'Іспанія',
'Італія', 'Йорданія', 'Кабо-Верде', 'Казахстан', 'Камбоджа', 'Камерун',
'Канада', 'Катар', 'Кенія', 'Киргизстан', 'КНР', 'Кіпр', 'Кірибаті',
'Колумбія', 'Коморські Острови', 'Конго', 'ДР Конго', 'Південна Корея',
'Північна Корея', 'Косово', 'Коста-Рика', 'Кот-д\'Івуар', 'Куба',
'Кувейт', 'Лаос', 'Латвія', 'Лесото', 'Литва', 'Ліберія', 'Ліван',
'Лівія', 'Ліхтенштейн', 'Люксембург', 'Маврикій', 'Мавританія',
'Мадагаскар', 'Республіка Македонія', 'Малаві', 'Малайзія', 'Малі',
'Мальдіви', 'Мальта', 'Марокко', 'Маршаллові Острови', 'Мексика',
'Федеративні Штати Мікронезії', 'Мозамбік', 'Молдова', 'Монако',
'Монголія', 'М\'янма', 'Намібія', 'Науру', 'Непал', 'Нігер', 'Нігерія',
'Нідерланди', 'Нікарагуа', 'Німеччина', 'Нова Зеландія', 'Норвегія',
'ОАЕ', 'Оман', 'Пакистан', 'Палау', 'Палестинська держава', 'Панама',
'Папуа Нова Гвінея', 'ПАР', 'Парагвай', 'Перу', 'Південний Судан',
'Польща', 'Португалія', 'Росія', 'Руанда', 'Румунія', 'Сальвадор',
'Самоа', 'Сан-Марино', 'Сан-Томе і Принсіпі', 'Саудівська Аравія',
'Свазіленд', 'Сейшельські Острови', 'Сенегал',
'Сент-Вінсент і Гренадини', 'Сент-Кіттс і Невіс', 'Сент-Люсія',
'Сербія', 'Сінгапур', 'Сирія', 'Словаччина', 'Словенія',
'Соломонові Острови', 'Сомалі', 'Судан', 'Суринам', 'Східний Тимор',
'США', 'Сьєрра-Леоне', 'Таджикистан', 'Таїланд', 'Тайвань', 'Танзанія',
'Того', 'Тонга', 'Тринідад і Тобаго', 'Тувалу', 'Туніс', 'Туреччина',
'Туркменістан', 'Уганда', 'Угорщина', 'Узбекистан', 'Україна',
'Уругвай', 'Фіджі', 'Філіппіни', 'Фінляндія', 'Франція', 'Хорватія',
'Центральноафриканська Республіка', 'Чад', 'Чехія', 'Чилі',
'Чорногорія', 'Швейцарія', 'Швеція', 'Шрі-Ланка', 'Ямайка', 'Японія'
]
street_prefixes = [
'вулиця', 'проспект', 'майдан', 'набережна', 'бульвар', 'провулок'
]
street_suffixes = ['узвіз']
@classmethod
def city_prefix(cls):
return cls.random_element(cls.city_prefixes)
@classmethod
def postcode(cls):
"""The code consists of five digits (01000-99999)"""
return '{}{}'.format(randint(0, 10), randint(1000, 10000))
@classmethod
def street_prefix(cls):
return cls.random_element(cls.street_prefixes)
| mit | -5,230,261,958,806,217,000 | 51.662162 | 79 | 0.590967 | false |
JMY1000/CyclesMineways | CyclesMineways.py | 1 | 46368 | # Cycles Mineways setup
# Version 1.3.0, 5/28/16
# Copyright © 2016
# Please send suggestions or report bugs at https://github.com/JMY1000/CyclesMineways/
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation under version 3 of the License.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details at http://www.gnu.org/licenses/gpl-3.0.en.html
# Distributed with Mineways, http://mineways.com
# To use the script within Blender, for use with the Cycles renderer:
# Open Blender and import the obj file created by Mineways.
# Change any window to the text editor.
# Alternatively, Go to the top of the window where it says "Default",
# click on the screen layout button left to the word "Default" and pick "Scripting".
# Click "Open" at the bottom of the text window.
# Go to the directory where this file, "CyclesMineways.py", is and select it.
# You should now see some text in the text window.
# Alternatively, you can click "new" then paste in the text.
# To apply this script, click on the "Run Script" button at the bottom of the text window.
# OPTIONAL: To see that the script's print output, you may want to turn on the terminal/console.
# It is not critical to see this window, but it might give you a warm and fuzzy feeling to know that the script has worked.
# It also helps provide debug info if something goes wrong.
# For Windows:
# From the upper left of your window select "Window" and then "Toggle System Console".
# For OS X:
# Find your application, right click it, hit "Show Package Contents".
# Navigate to Contents/MacOS/blender Launch blender this way, this will show the terminal.
# For Linux:
# Run Blender through the terminal.
#importing the Blender Python library
import bpy
print("Libraries imported")
# CONSTANTS
# PREFIX can stay as "" if you are importing into project that is not massive and has no other imported mineways worlds.
# If the .blend does not meet these requirements, you must set PREFIX to allow this script to know what it is working with.
# Set the PREFIX to the name of the file it uses (eg: a castle.obj file uses PREFIX = "castle")
PREFIX = ""
# USER_INPUT_SCENE controls what scenes Blender will apply this script's functionality to.
# If this list has scenes, the script only use those scenes to work with;
# otherwise, it will use all scenes
# example: USER_INPUT_SCENE = ["scene","scene2","randomScene123"]
USER_INPUT_SCENE = []
# WATER_SHADER_TYPE controls the water shader that will be used.
# Use 0 for a solid block shader.
# Use 1 for a semi-transparent flat shader.
# Use 2 for a small, sharp waves shader.
# Use 3 for a wavy shader.
# For a more detailed explanation with pictures of each water shader type, visit: https://github.com/JMY1000/CyclesMineways/wiki/Water-Shader-Types
WATER_SHADER_TYPE = 1
# TIME_OF_DAY controls the time of day.
# If TIME_OF_DAY is between 6.5 and 19.5 (crossing 12), the daytime shader will be used.
# If TIME_OF_DAY is between 19.5 and 6.5 (crossing 24), the nighttim shader will be used.
# NOTE: The decimal is not in minutes, and is a fraction (ex. 12:30 is 12.50).
# NOTE: This currently only handles day and night
TIME_OF_DAY = 12.00
# DISPLACE_WOOD controls whether virtual displacement (changes normals for illusion of roughness) for wooden plank blocks is used.
# NOTE: This currently only works for oak wood planks.
# NOTE: This can only be True or False
DISPLACE_WOOD = False
# STAINED_GLASS_COLOR controls how coloured the light that passed through stained glass is.
# 0 means light passed through unchanged
# 1 means all the light is changed to the glass's color (not recommended)
STAINED_GLASS_COLOR = 0.4
#List of transparent blocks
transparentBlocks = ["Acacia_Leaves","Dark_Oak_Leaves","Acacia_Door","Activator_Rail","Bed","Beetroot_Seeds","Birch_Door","Brewing_Stand","Cactus","Carrot","Carrots","Cauldron","Chorus_Flower","Chorus_Flower_Dead","Chorus_Plant","Cobweb",
"Cocoa","Crops","Dandelion","Dark_Oak_Door","Dead_Bush","Detector_Rail","Enchantment_Table","Glass","Glass_Pane","Grass","Iron_Bars","Iron_Door","Iron_Trapdoor","Jack_o'Lantern","Jungle_Door","Large_Flowers",
"Leaves","Melon_Stem","Monster_Spawner","Nether_Portal","Nether_Wart","Oak_Leaves","Oak_Sapling","Poppy","Potato","Potatoes","Powered_Rail","Pumpkin_Stem","Rail","Red_Mushroom",
"Redstone_Comparator_(inactive)","Redstone_Torch_(inactive)","Repeater_(inactive)","Sapling","Spruce_Door","Stained_Glass_Pane","Sugar_Cane","Sunflower","Tall_Grass","Trapdoor","Vines","Wheat","Wooden_Door"]
#List of light emitting blocks
lightBlocks = ["Daylight_Sensor","End_Gateway","End_Portal","Ender_Chest","Flowing_Lava","Glowstone","Inverted_Daylight_Sensor","Lava","Magma_Block","Redstone_Lamp_(active)","Stationary_Lava","Sea_Lantern"]
#List of light emitting and transparent block
lightTransparentBlocks = ["Beacon","Brown_Mushroom","Dragon_Egg","Endframe","End_Rod","Fire","Powered_Rail_(active)","Redstone_Comparator_(active)","Redstone_Torch_(active)","Repeater_(active)","Torch"]
#SHADERS
def Setup_Node_Tree(material):
#Make the material use nodes
material.use_nodes=True
#Set the variable node_tree to be the material's node tree and variable nodes to be the node tree's nodes
node_tree=material.node_tree
nodes=material.node_tree.nodes
#Remove the old nodes
for eachNode in nodes:
nodes.remove(eachNode)
return nodes,node_tree
def Normal_Shader(material,rgba_image):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,300)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = rgba_image
rgba_node.interpolation=('Closest')
rgba_node.location=(-300,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(diffuse_node.outputs["BSDF"],output_node.inputs["Surface"])
def Transparent_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,400)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Light_Emiting_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the diffuse deciding mix node
diffuse_mix_node=nodes.new('ShaderNodeMixShader')
diffuse_mix_node.location=(300,300)
#Create the Light Path Node
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,600)
#Create the diffuse emission
indirect_emission_node=nodes.new('ShaderNodeEmission')
indirect_emission_node.location=(0,100)
#Create the Light Falloff node for indirect emission
light_falloff_node=nodes.new('ShaderNodeLightFalloff')
light_falloff_node.location=(-300,0)
light_falloff_node.inputs[0].default_value=200 #sets strength of light
light_falloff_node.inputs[1].default_value=0.03 #sets smooth level of light
#Create the HSV node to brighten the light
hsv_node=nodes.new('ShaderNodeHueSaturation')
hsv_node.location=(-300,200)
hsv_node.inputs["Value"].default_value=3 # brightens the color for better lighting
#Create the direct emission node
direct_emission_node=nodes.new('ShaderNodeEmission')
direct_emission_node.location=(0,300)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],direct_emission_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],hsv_node.inputs["Color"])
links.new(hsv_node.outputs["Color"],indirect_emission_node.inputs["Color"])
links.new(light_falloff_node.outputs[0],indirect_emission_node.inputs[1]) #connects quadratic output to emission strength
links.new(indirect_emission_node.outputs["Emission"],diffuse_mix_node.inputs[2])
links.new(direct_emission_node.outputs["Emission"],diffuse_mix_node.inputs[1])
links.new(light_path_node.outputs[2],diffuse_mix_node.inputs["Fac"]) #links "is diffuse ray" to factor of mix node
links.new(diffuse_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Transparent_Emiting_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the indirect-direct mix shader
indirect_mix_node=nodes.new('ShaderNodeMixShader')
indirect_mix_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the Light Path node to check if light is indirect
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,800)
#Create the Light Falloff node for indirect emission
light_falloff_node=nodes.new('ShaderNodeLightFalloff')
light_falloff_node.location=(-300,600)
light_falloff_node.inputs[0].default_value=80 #sets strength of light
light_falloff_node.inputs[1].default_value=0.03 #sets smooth level of light
#Create the indirect emission node
indirect_emission_node=nodes.new('ShaderNodeEmission')
indirect_emission_node.location=(0,500)
indirect_emission_node.inputs["Color"].default_value = (1,1,0.56,1)
#Only tested color on torches, needs testing on other transparent emitters to see if it looks weird
#Create the direct emission node
emission_node=nodes.new('ShaderNodeEmission')
emission_node.location=(-300,400)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],emission_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(emission_node.outputs["Emission"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],indirect_mix_node.inputs[1])
links.new(light_falloff_node.outputs["Quadratic"],indirect_emission_node.inputs["Strength"])
links.new(indirect_emission_node.outputs["Emission"],indirect_mix_node.inputs[2])
links.new(light_path_node.outputs["Is Diffuse Ray"],indirect_mix_node.inputs["Fac"])
links.new(indirect_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Lily_Pad_Shader(material):
#A water setup shader should have ran before this
#Set the variable node_tree to be the material's node tree and variable nodes to be the node tree's nodes
node_tree=material.node_tree
nodes=material.node_tree.nodes
output = None
image_node = None
for node in nodes:
if node.name=="Material Output":
output=node
if node.name=="Image Texture": #assumes only 1 image input
image_node=node
output.location = (600,300)
water_output = output.inputs[0].links[0].from_node
mix_node = nodes.new('ShaderNodeMixShader')
mix_node.location=(300,500)
diffuse_node = nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,500)
RGB_splitter_node = nodes.new('ShaderNodeSeparateRGB')
RGB_splitter_node.location=(-300,700)
less_than_node = nodes.new('ShaderNodeMath')
less_than_node.location=(0,700)
less_than_node.operation="LESS_THAN"
links=node_tree.links
links.new(mix_node.outputs[0],output.inputs[0])
links.new(diffuse_node.outputs[0],mix_node.inputs[1])
links.new(water_output.outputs[0],mix_node.inputs[2]) #making massive assumption that output of water is in first output
links.new(less_than_node.outputs[0],mix_node.inputs[0])
links.new(image_node.outputs[0],diffuse_node.inputs[0])
links.new(RGB_splitter_node.outputs[2],less_than_node.inputs[1])
links.new(RGB_splitter_node.outputs[1],less_than_node.inputs[0])
links.new(image_node.outputs[0],RGB_splitter_node.inputs[0])
def Stained_Glass_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,400)
#Create shadow(math)-color(HSV) mix node
shadow_color_mix_node=nodes.new('ShaderNodeMixRGB')
shadow_color_mix_node.location=(-600,400)
shadow_color_mix_node.inputs[1].default_value=(1,1,1,0)
#Create HSV node because for some reason color from the RGBA node in transparent nodes is super dark
hsv_node=nodes.new('ShaderNodeHueSaturation')
hsv_node.location=(-900,280)
hsv_node.inputs[1].default_value=2
hsv_node.inputs[2].default_value=8
#Create math(multiply, clamped) node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(-900,450)
multiply_node.operation=('MULTIPLY')
multiply_node.use_clamp=True
multiply_node.inputs[1].default_value=STAINED_GLASS_COLOR
#Create math(add, clamped) node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-1200,450)
add_node.operation=('ADD')
add_node.use_clamp=True
#Create the lightpath node
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(-1500,450)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-900,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-1200,100)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(rgba_node.outputs["Color"],hsv_node.inputs["Color"])
links.new(light_path_node.outputs[1],add_node.inputs[0]) #connects Is Shadow Ray to add node
links.new(light_path_node.outputs[2],add_node.inputs[1]) #connects Is Shadow Ray to add node
links.new(add_node.outputs[0],multiply_node.inputs[0])
links.new(multiply_node.outputs["Value"],shadow_color_mix_node.inputs["Fac"])
links.new(hsv_node.outputs["Color"],shadow_color_mix_node.inputs[2])
links.new(shadow_color_mix_node.outputs["Color"],transparent_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Stationary_Water_Shader_1(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the fresnel mix shader
fresnel_mix_node=nodes.new('ShaderNodeMixShader')
fresnel_mix_node.location=(0,300)
#Create Fresnel node ior=1.33
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(-300,400)
fresnel_node.inputs[0].default_value=1.33
#Create the transparency-diffuse mixer
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(-300,300)
mix_node.inputs[0].default_value=0.4
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-600,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,180)
#Create the glossy shader
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,100)
glossy_node.inputs[1].default_value=0.02
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-900,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],glossy_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[2])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[1])
links.new(fresnel_node.outputs["Fac"],fresnel_mix_node.inputs["Fac"])
links.new(mix_node.outputs["Shader"],fresnel_mix_node.inputs[1])
links.new(glossy_node.outputs["BSDF"],fresnel_mix_node.inputs[2])
links.new(fresnel_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Stationary_Water_Shader_2(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the fresnel mix shader
fresnel_mix_node=nodes.new('ShaderNodeMixShader')
fresnel_mix_node.location=(300,300)
#Create Fresnel node
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(0,500)
fresnel_node.inputs[0].default_value=1.33
#Create the mix+transparent mix shader
mix_node_transparent_mix=nodes.new('ShaderNodeMixShader')
mix_node_transparent_mix.location=(0,300)
mix_node_transparent_mix.inputs[0].default_value=0.18
#Create the refraction-glossy mix shader
mix_node_ref_glossy=nodes.new('ShaderNodeMixShader')
mix_node_ref_glossy.location=(-300,0)
mix_node_ref_glossy.inputs[0].default_value=0.72
#Create Diffuse-transparent mix shader
diffuse_transparent_mix_shader=nodes.new('ShaderNodeMixShader')
diffuse_transparent_mix_shader.location=(-300,450)
diffuse_transparent_mix_shader.inputs["Fac"].default_value = 0.5
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,400)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-600,550)
#Create the glossy node
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,0)
glossy_node.inputs["Roughness"].default_value=0.005
#Create the refraction node
refraction_node=nodes.new('ShaderNodeBsdfRefraction')
refraction_node.location=(-600,300)
refraction_node.inputs[2].default_value=1.33
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-900,300)
rgba_node.label = "RGBA"
#Create the first multiply node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(0,-300)
multiply_node.operation=('MULTIPLY')
multiply_node.inputs[1].default_value=0.05
#Create the add node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-300,-300)
add_node.operation=('ADD')
#Create the first voronoi texture
voronoi_node=nodes.new('ShaderNodeTexVoronoi')
voronoi_node.location=(-600,-300)
voronoi_node.inputs[1].default_value=20
#Create the second multiply node
multiply_node_two=nodes.new('ShaderNodeMath')
multiply_node_two.location=(-600,-600)
multiply_node_two.operation=('MULTIPLY')
#Create the second voronoi texture
voronoi_node_two=nodes.new('ShaderNodeTexVoronoi')
voronoi_node_two.location=(-900,-600)
voronoi_node_two.inputs[1].default_value=30
#Create the texture coordinate node
texture_coordinate_node=nodes.new('ShaderNodeTexCoord')
texture_coordinate_node.location=(-1200,-300)
#Link the nodes
links=node_tree.links
links.new(fresnel_mix_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(fresnel_node.outputs["Fac"],fresnel_mix_node.inputs[0])
links.new(mix_node_transparent_mix.outputs["Shader"],fresnel_mix_node.inputs[1])
links.new(diffuse_transparent_mix_shader.outputs["Shader"],mix_node_transparent_mix.inputs[1])
links.new(diffuse_node.outputs["BSDF"],diffuse_transparent_mix_shader.inputs[1])
links.new(transparent_node.outputs["BSDF"],diffuse_transparent_mix_shader.inputs[2])
links.new(mix_node_ref_glossy.outputs["Shader"],mix_node_transparent_mix.inputs[2])
links.new(mix_node_ref_glossy.outputs["Shader"],fresnel_mix_node.inputs[2])
links.new(refraction_node.outputs["BSDF"],mix_node_ref_glossy.inputs[1])
links.new(glossy_node.outputs["BSDF"],mix_node_ref_glossy.inputs[2])
links.new(rgba_node.outputs["Color"],refraction_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(multiply_node.outputs["Value"],output_node.inputs["Displacement"])
links.new(add_node.outputs["Value"],multiply_node.inputs[0])
links.new(voronoi_node.outputs["Fac"],add_node.inputs[0])
links.new(multiply_node_two.outputs["Value"],add_node.inputs[1])
links.new(voronoi_node_two.outputs["Fac"],multiply_node_two.inputs[0])
links.new(texture_coordinate_node.outputs["Object"],voronoi_node.inputs["Vector"])
links.new(texture_coordinate_node.outputs["Object"],voronoi_node_two.inputs["Vector"])
def Stationary_Water_Shader_3(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the first mix shader node
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(-300,300)
#Create the clamped add node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-600,600)
add_node.operation=('ADD')
add_node.use_clamp=True
#Create the fresnel node
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(-900,600)
fresnel_node.inputs["IOR"].default_value=1.33
#Create the transparent shader node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,400)
#Create the glossy shader node
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,300)
glossy_node.inputs["Roughness"].default_value=0.02
#Create the rgb mix shader
rgbmix_node=nodes.new('ShaderNodeMixRGB')
rgbmix_node.location=(-900,300)
rgbmix_node.inputs["Fac"].default_value=0.3
rgbmix_node.inputs["Color2"].default_value=(1,1,1,1)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-1200,300)
rgba_node.label = "RGBA"
#Create the wave texture node
wave_node=nodes.new('ShaderNodeTexWave')
wave_node.location=(-1200,0)
wave_node.inputs["Scale"].default_value=1.7
wave_node.inputs["Distortion"].default_value=34
wave_node.inputs["Detail"].default_value=5
wave_node.inputs["Detail Scale"].default_value=5
#Create the multiply node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(-600,0)
multiply_node.operation=('MULTIPLY')
#Link the nodes
links=node_tree.links
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(add_node.outputs["Value"],mix_node.inputs["Fac"])
links.new(fresnel_node.outputs["Fac"],add_node.inputs[0])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(glossy_node.outputs["BSDF"],mix_node.inputs[2])
links.new(rgbmix_node.outputs["Color"],glossy_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],rgbmix_node.inputs["Color1"])
links.new(multiply_node.outputs["Value"],output_node.inputs["Displacement"])
links.new(wave_node.outputs["Fac"],multiply_node.inputs[0])
def Flowing_Water_Shader(material):
material.use_nodes=True
def Slime_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Ice_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Sky_Day_Shader(world):
nodes, node_tree = Setup_Node_Tree(world)
#Add the output node
output_node=nodes.new('ShaderNodeOutputWorld')
output_node.location=(300,300)
#Add the background node
background_node=nodes.new('ShaderNodeBackground')
background_node.location=(0,300)
#Add the color correct node
HSV_node=nodes.new('ShaderNodeHueSaturation')
HSV_node.inputs["Value"].default_value=1.6 #Corrects the color value to be the same as Minecraft's sky
HSV_node.location=(-300,300)
#Add the sky texture node
sky_node=nodes.new('ShaderNodeTexSky')
sky_node.location=(-600,300)
#Link the nodes
links=node_tree.links
links.new(background_node.outputs["Background"],output_node.inputs["Surface"])
links.new(sky_node.outputs["Color"],HSV_node.inputs["Color"])
links.new(HSV_node.outputs["Color"],background_node.inputs["Color"])
def Sky_Night_Shader(world):
nodes, node_tree = Setup_Node_Tree(world)
#Add the output node
output_node=nodes.new('ShaderNodeOutputWorld')
output_node.location=(600,300)
#Add solid color background for diffuse textures
solid_background_node=nodes.new('ShaderNodeBackground')
solid_background_node.location=(0,150)
solid_background_node.inputs["Color"].default_value=(0.1,0.1,0.1,1)
#Add Light Path Node to make sure solid colour is only used for diffuse shaders
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,600)
#Add mix shader to add the diffuse-only background
diffuse_mixer_node=nodes.new('ShaderNodeMixShader')
diffuse_mixer_node.location=(300,300)
#Add the first background node
background_node=nodes.new('ShaderNodeBackground')
background_node.location=(0,300)
#Create the rgb mix shader
rgbmix_node=nodes.new('ShaderNodeMixRGB')
rgbmix_node.location=(-200,300)
rgbmix_node.inputs["Fac"].default_value=0.01
#Add the sky texture node
sky_node=nodes.new('ShaderNodeTexSky')
sky_node.location=(-600,0)
#Add the colorramp node
colorramp_node=nodes.new('ShaderNodeValToRGB')
colorramp_node.location=(-600,300)
colorramp_node.color_ramp.interpolation=('CONSTANT')
colorramp_node.color_ramp.elements[1].position=0.03
colorramp_node.color_ramp.elements[1].color=(0,0,0,255)
colorramp_node.color_ramp.elements[0].color=(255,255,255,255)
#Add the voronoi texture
voronoi_node=nodes.new('ShaderNodeTexVoronoi')
voronoi_node.location=(-900,300)
voronoi_node.coloring=("CELLS")
voronoi_node.inputs["Scale"].default_value=200
#Link the nodes
links=node_tree.links
links.new(diffuse_mixer_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(solid_background_node.outputs["Background"],diffuse_mixer_node.inputs[2])
links.new(light_path_node.outputs["Is Diffuse Ray"],diffuse_mixer_node.inputs[0]) # connects "Is Diffuse Ray" to factor
links.new(background_node.outputs["Background"],diffuse_mixer_node.inputs[1])
links.new(rgbmix_node.outputs["Color"],background_node.inputs["Color"])
links.new(colorramp_node.outputs["Color"],rgbmix_node.inputs["Color1"])
links.new(sky_node.outputs["Color"],rgbmix_node.inputs["Color2"])
links.new(voronoi_node.outputs["Color"],colorramp_node.inputs["Fac"])
def Wood_Displacement_Texture(material,rgba_image):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,300)
diffuse_node.inputs[1].default_value=0.3 # sets diffuse to 0.3
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = rgba_image
rgba_node.interpolation=('Closest')
rgba_node.location=(-300,300)
rgba_node.label = "RGBA"
#Create displacement node tree
#Create magic node 1
magic_node_one=nodes.new('ShaderNodeTexMagic')
magic_node_one.location=(-900,200)
magic_node_one.turbulence_depth=6 #sets depth to 6
magic_node_one.inputs[1].default_value=5 #sets scale to 5
magic_node_one.inputs[2].default_value=10 #sets distortion to 10
#Create magic node 2
magic_node_two=nodes.new('ShaderNodeTexMagic')
magic_node_two.location=(-900,0)
magic_node_two.turbulence_depth=5 #sets depth to 5
magic_node_two.inputs[1].default_value=3.3 #sets scale to 3.3
magic_node_two.inputs[2].default_value=2.7 #sets distortion to 2.7
#Create Add node
#Connects to magic node 1 and 2
math_add_node_one=nodes.new('ShaderNodeMath')
math_add_node_one.location=(-600,0)
math_add_node_one.operation="ADD"
#Create noise texture
noise_node=nodes.new('ShaderNodeTexNoise')
noise_node.location=(-900,-200)
noise_node.inputs[1].default_value=6.9 #sets scale to 6.9
noise_node.inputs[2].default_value=5 #set detail to 5
noise_node.inputs[3].default_value=8 #sets distortion to 8
#Create multiply
#Connects to noise and 5
math_multiply_node=nodes.new('ShaderNodeMath')
math_multiply_node.location=(-600,-200)
math_multiply_node.operation="MULTIPLY"
math_multiply_node.inputs[1].default_value=5 #sets multiply value to 5
#Create 2nd Add node
#Connects to Add node and multiply node
math_add_node_two=nodes.new('ShaderNodeMath')
math_add_node_two.operation="ADD"
math_add_node_two.location=(-300,0)
#Create Divide node
#Connect from 2nd add node and input [1] to 10
#Connects to materials output
math_divide_node=nodes.new('ShaderNodeMath')
math_divide_node.location=(0,150)
math_divide_node.operation="DIVIDE"
math_divide_node.inputs[1].default_value=10
#Link the nodes
links=node_tree.links
#link surface modifiers
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(diffuse_node.outputs["BSDF"],output_node.inputs["Surface"])
#link displacement modifiers
links.new(magic_node_one.outputs["Fac"],math_add_node_one.inputs[0])
links.new(magic_node_two.outputs["Fac"],math_add_node_one.inputs[1])
links.new(math_add_node_one.outputs[0],math_add_node_two.inputs[0])
links.new(noise_node.outputs["Fac"],math_multiply_node.inputs[0])
links.new(math_multiply_node.outputs[0],math_add_node_two.inputs[1])
links.new(math_add_node_two.outputs[0],math_divide_node.inputs[0])
links.new(math_divide_node.outputs[0],output_node.inputs["Displacement"])
#MAIN
def main():
print("Main started")
#packing all the files into one .blend
print("Packing files")
bpy.ops.file.pack_all()
print("Files packed")
#finding the PREFIX for mineways
global PREFIX
print("Gettting PREFIX ('"+PREFIX+"')")
if PREFIX == "":
print("No prefix found, finding best PREFIX")
names={} # initalises a dictionary
for img in bpy.data.images: # loops through all images in .blend file
pos = max( # sets pos to be the max value of the 3 values
img.name.rfind("-RGBA.png"), # if "-RGBA.png" is in the file name, returns non -1, else returns -1
img.name.rfind("-RGB.png"), # if "-RGB.png" is in the file name, returns non -1, else returns -1
img.name.rfind("-Alpha.png")) # if "-Alpha.png" is in the file name, returns non -1, else returns -1
# all this max statement really does is checks if the string contains any of those strings, if not, it is -1
print("Checking:",img.name,"(Position: ",pos,"Prefix: ",img.name[:pos]+")")
if pos!=-1: # if pos==1, it does not contain "-RGBA.png" or "-RGB.png" or "-Alpha.png"
try:
names[img.name[:pos]]+=1 # if a key called the file name in the dictionary exists, increase its value by 1
except KeyError:
names[img.name[:pos]]=1 # this code is only reached if the value could not be increased by one
# this happens when the value does not exist (i.e. the key does not exist because this is the first loop)
print("names: ",names)
PREFIX = max(names) # finds the name of the key in the dictionary that has the highest value
# this is how the code determines what the PREFIX should be (majority vote)
print("Got PREFIX ('"+PREFIX+"')")
#Setting the render engine to Cycles and filtering materials that will be processed
print("Setting the render engine to Cycles and filtering materials that will be processed")
materials = []
#if the user doesn't provide any scenes, add all materials that exist to global "materials"
if len(USER_INPUT_SCENE)==0:
for scene in bpy.data.scenes:
scene.render.engine = 'CYCLES'
for material in bpy.data.materials:
materials.append(material)
#else for each scene provided
else:
for scene in bpy.data.scenes:
print("Checking for:",scene.name)
if scene.name in USER_INPUT_SCENE:
print("Adding materials from scene:",scene.name)
scene.render.engine='CYCLES'
#check to see if it's related to Mineways by checking if it has an active material
for object in scene.objects:
if object.active_material!=None: # This is a bad way or checking of an object is Mineways'
# we probably need to check its assigned texture, or name to see if it is one of our objects
materials.append(object.active_material)
print("Render engine set to Cycles for selected scenes")
try:
texture_rgba_image = bpy.data.images[PREFIX+"-RGBA.png"]
except:
print("Cannot find image. PREFIX is invalid.")
return
print("Setting up textures")
#for every material
for material in materials:
if (material.active_texture and len(material.active_texture.name)>=2 and material.active_texture.name[0:2]=="Kd"):
material_suffix = material.name[material.name.rfind("."):len(material.name)] # gets the .001 .002 .003 ... of the material
try:
int(material_suffix[1:])
except:
material_suffix=""
#if the material is transparent use a special shader
if any(material==bpy.data.materials.get(transparentBlock+material_suffix) for transparentBlock in transparentBlocks):
print(material.name+" is transparent.")
Transparent_Shader(material)
#if the material is a light emmitting block use a special shader
elif any(material==bpy.data.materials.get(lightBlock+material_suffix) for lightBlock in lightBlocks):
print(material.name+" is light block.")
Light_Emiting_Shader(material)
#if the material is a light emmitting transparent block use a special shader
elif any(material==bpy.data.materials.get(lightTransparentBlocks+material_suffix) for lightTransparentBlocks in lightTransparentBlocks):
print(material.name+" is transparent light block.")
Transparent_Emiting_Shader(material)
#if the material is stained glass, use a special shader
elif material==bpy.data.materials.get("Stained_Glass"+material_suffix):
print(material.name+" is stained glass.")
Stained_Glass_Shader(material)
#if the material is stationary water or a lily pad, use a special shader
elif material==bpy.data.materials.get("Stationary_Water"+material_suffix) or material==bpy.data.materials.get("Water"+material_suffix) or material==bpy.data.materials.get("Lily_Pad"+material_suffix):
print(material.name+" is water or a lily pad.")
print("Using shader type",WATER_SHADER_TYPE)
if WATER_SHADER_TYPE==0:
Normal_Shader(material,texture_rgba_image)
elif WATER_SHADER_TYPE==1:
Stationary_Water_Shader_1(material)
elif WATER_SHADER_TYPE==2:
Stationary_Water_Shader_2(material)
elif WATER_SHADER_TYPE==3:
Stationary_Water_Shader_3(material)
else:
print("ERROR! COULD NOT SET UP WATER")
Normal_Shader(material,texture_rgba_image)
if material==bpy.data.materials.get("Lily_Pad"+material_suffix):
Lily_Pad_Shader(material)
#if the material is flowing water, use a special shader
elif material==bpy.data.materials.get("Flowing_Water"+material_suffix):
print(material.name+" is flowing water.")
pass
#if the material is slime, use a special shader
elif material==bpy.data.materials.get("Slime"+material_suffix):
print(material.name+" is slime.")
Slime_Shader(material)
#if the material is ice, use a special shader
elif material==bpy.data.materials.get("Ice"+material_suffix):
print(material.name+" is ice.")
Ice_Shader(material)
#if the material is wood and DISPLACE_WOOD is True
elif (material==bpy.data.materials.get("Oak_Wood_Planks"+material_suffix))and(DISPLACE_WOOD):
print(material.name+" is displaced wooden planks.")
Wood_Displacement_Texture(material,texture_rgba_image)
#else use a normal shader
else:
print(material.name+" is normal.")
Normal_Shader(material,texture_rgba_image)
print("Finished setting up materials")
#Set up the sky
print("Started shading sky")
for world in bpy.data.worlds:
if 6.5<=TIME_OF_DAY<=19.5:
Sky_Day_Shader(world)
else:
Sky_Night_Shader(world)
print("Sky shaded")
#Remove unnecessary textures
print("Removing unnecessary textures")
for img in bpy.data.images: # loops through all images in ,blend file
try:
suffix = img.name.rfind(".") # finds the index of the last . in the image's name
int(img.name[suffix+1:]) # check to see if the characters after the . are numbers
# EG test.001 would work (and return 1, but we're not getting its return value)
# and test would error out, as suffix = -1, therefor int("test") errors
# if the entire name of the image is a number (eg: 123.png), it will remove it by mistake //needs fixing
print("Texture "+img.name+" removed for being a duplicate.")
img.user_clear() # clears all the image's parents to it can be removed
bpy.data.images.remove(img) # removes image from .blend file
except:
if (img.name==PREFIX+"-Alpha.png") or (img.name==PREFIX+"-RGB.png"): # checks if img ends in "-Alpha.png" or "-RGB.png"
print("Texture "+img.name+" removed for being redundant")
img.user_clear() # clears all the image's parents to it can be removed
bpy.data.images.remove(img) # removes image from .blend file
else:
print("Texture "+img.name+" was not removed.") # only non-Mineways files can get here, or PREFIX.RGBA.png
print("Finished removing unnecessary textures")
### THE FOLLOWING CODE IS USED IN SETTING UP THE GUI, THIS FEATURE IS IN DEVELOPMENT.
### the following code makes buttons in the scenes tab that allow hotswitching between water types
class OBJECT_PT_water_changer(bpy.types.Panel): # The object used for drawing the buttons
bl_label = "Water Types" # the name of the sub-sub-catagory used
bl_space_type = "PROPERTIES" # the name of the main catagory used
bl_region_type = "WINDOW" # dunno
bl_context = "scene" # the name of the sub-catagory used
def draw(self, context): # called by blender when it wants to update the screen
self.layout.operator("object.water_changer", text='Use Solid Water').type="0" # draws water button 0
self.layout.operator("object.water_changer", text='Use Transparent Water').type="1" # draws water button 1
self.layout.operator("object.water_changer", text='Use Choppy Water').type="2" # draws water button 2
self.layout.operator("object.water_changer", text='Use Wavey Water').type="3" # draws water button 3
class OBJECT_OT_water_changer(bpy.types.Operator): # the object used for executing the buttons
bl_label = "Change Water Shader" # Used when pressing space on a viewport.
# Currently broken, as all the water type buttons go to one button.
bl_idname = "object.water_changer" # Used if another script wants to use this button
bl_description = "Change water shader" # Main text of the tool tip
type = bpy.props.StringProperty() # Gets the type data set in BJECT_PT_water_changer.draw()
def execute(self, context):
print("self:",self.type,"len",len(self.type))
print("selected object:",context.object)
self.report({'INFO'}, "Set water to type "+self.type) # Used by the progress bar thingy that
# tells you when stuff is done in Blender.
global WATER_SHADER_TYPE # Allows WATER_SHADER_TYPE to be set globally
if self.type=="0":
print("setting to type 0")
WATER_SHADER_TYPE=0
elif self.type=="1":
print("setting to type 1")
WATER_SHADER_TYPE=1
elif self.type=="2":
print("setting to type 2")
WATER_SHADER_TYPE=2
elif self.type=="3":
print("setting to type 3")
WATER_SHADER_TYPE=3
# Sets WATER_SHADER_TYPE to something
main() # Runs the main script
return{'FINISHED'} # Required by Blender
def register():
bpy.utils.register_module(__name__) # Needed to register the custom GUI components
def unregister():
bpy.utils.unregister_module(__name__) # Needed to unregister the custom GUI components
### END OF GUI CODE
if __name__ == "__main__": # Standard python check to see if the code is being ran, or added as a module
print("\nStarted Cycles Mineways import script.\n")
main() # Runs the main script
#register() # Sets up the GUI
print("\nCycles Mineways has finished.\n")
| gpl-3.0 | -3,542,560,193,766,556,700 | 46.168871 | 238 | 0.695085 | false |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/rest_framework/throttling.py | 25 | 8143 | """
Provides various throttling policies.
"""
from __future__ import unicode_literals
import time
from django.core.cache import cache as default_cache
from django.core.exceptions import ImproperlyConfigured
from rest_framework.compat import is_authenticated
from rest_framework.settings import api_settings
class BaseThrottle(object):
"""
Rate throttling of requests.
"""
def allow_request(self, request, view):
"""
Return `True` if the request should be allowed, `False` otherwise.
"""
raise NotImplementedError('.allow_request() must be overridden')
def get_ident(self, request):
"""
Identify the machine making the request by parsing HTTP_X_FORWARDED_FOR
if present and number of proxies is > 0. If not use all of
HTTP_X_FORWARDED_FOR if it is available, if not use REMOTE_ADDR.
"""
xff = request.META.get('HTTP_X_FORWARDED_FOR')
remote_addr = request.META.get('REMOTE_ADDR')
num_proxies = api_settings.NUM_PROXIES
if num_proxies is not None:
if num_proxies == 0 or xff is None:
return remote_addr
addrs = xff.split(',')
client_addr = addrs[-min(num_proxies, len(addrs))]
return client_addr.strip()
return ''.join(xff.split()) if xff else remote_addr
def wait(self):
"""
Optionally, return a recommended number of seconds to wait before
the next request.
"""
return None
class SimpleRateThrottle(BaseThrottle):
"""
A simple cache implementation, that only requires `.get_cache_key()`
to be overridden.
The rate (requests / seconds) is set by a `throttle` attribute on the View
class. The attribute is a string of the form 'number_of_requests/period'.
Period should be one of: ('s', 'sec', 'm', 'min', 'h', 'hour', 'd', 'day')
Previous request information used for throttling is stored in the cache.
"""
cache = default_cache
timer = time.time
cache_format = 'throttle_%(scope)s_%(ident)s'
scope = None
THROTTLE_RATES = api_settings.DEFAULT_THROTTLE_RATES
def __init__(self):
if not getattr(self, 'rate', None):
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
def get_cache_key(self, request, view):
"""
Should return a unique cache-key which can be used for throttling.
Must be overridden.
May return `None` if the request should not be throttled.
"""
raise NotImplementedError('.get_cache_key() must be overridden')
def get_rate(self):
"""
Determine the string representation of the allowed request rate.
"""
if not getattr(self, 'scope', None):
msg = ("You must set either `.scope` or `.rate` for '%s' throttle" %
self.__class__.__name__)
raise ImproperlyConfigured(msg)
try:
return self.THROTTLE_RATES[self.scope]
except KeyError:
msg = "No default throttle rate set for '%s' scope" % self.scope
raise ImproperlyConfigured(msg)
def parse_rate(self, rate):
"""
Given the request rate string, return a two tuple of:
<allowed number of requests>, <period of time in seconds>
"""
if rate is None:
return (None, None)
num, period = rate.split('/')
num_requests = int(num)
duration = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period[0]]
return (num_requests, duration)
def allow_request(self, request, view):
"""
Implement the check to see if the request should be throttled.
On success calls `throttle_success`.
On failure calls `throttle_failure`.
"""
if self.rate is None:
return True
self.key = self.get_cache_key(request, view)
if self.key is None:
return True
self.history = self.cache.get(self.key, [])
self.now = self.timer()
# Drop any requests from the history which have now passed the
# throttle duration
while self.history and self.history[-1] <= self.now - self.duration:
self.history.pop()
if len(self.history) >= self.num_requests:
return self.throttle_failure()
return self.throttle_success()
def throttle_success(self):
"""
Inserts the current request's timestamp along with the key
into the cache.
"""
self.history.insert(0, self.now)
self.cache.set(self.key, self.history, self.duration)
return True
def throttle_failure(self):
"""
Called when a request to the API has failed due to throttling.
"""
return False
def wait(self):
"""
Returns the recommended next request time in seconds.
"""
if self.history:
remaining_duration = self.duration - (self.now - self.history[-1])
else:
remaining_duration = self.duration
available_requests = self.num_requests - len(self.history) + 1
if available_requests <= 0:
return None
return remaining_duration / float(available_requests)
class AnonRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls that may be made by a anonymous users.
The IP address of the request will be used as the unique cache key.
"""
scope = 'anon'
def get_cache_key(self, request, view):
if is_authenticated(request.user):
return None # Only throttle unauthenticated requests.
return self.cache_format % {
'scope': self.scope,
'ident': self.get_ident(request)
}
class UserRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls that may be made by a given user.
The user id will be used as a unique cache key if the user is
authenticated. For anonymous requests, the IP address of the request will
be used.
"""
scope = 'user'
def get_cache_key(self, request, view):
if is_authenticated(request.user):
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
class ScopedRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls by different amounts for various parts of
the API. Any view that has the `throttle_scope` property set will be
throttled. The unique cache key will be generated by concatenating the
user id of the request, and the scope of the view being accessed.
"""
scope_attr = 'throttle_scope'
def __init__(self):
# Override the usual SimpleRateThrottle, because we can't determine
# the rate until called by the view.
pass
def allow_request(self, request, view):
# We can only determine the scope once we're called by the view.
self.scope = getattr(view, self.scope_attr, None)
# If a view does not have a `throttle_scope` always allow the request
if not self.scope:
return True
# Determine the allowed request rate as we normally would during
# the `__init__` call.
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
# We can now proceed as normal.
return super(ScopedRateThrottle, self).allow_request(request, view)
def get_cache_key(self, request, view):
"""
If `view.throttle_scope` is not set, don't apply this throttle.
Otherwise generate the unique cache key by concatenating the user id
with the '.throttle_scope` property of the view.
"""
if is_authenticated(request.user):
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
| mit | -2,495,019,639,814,025,700 | 31.185771 | 80 | 0.608498 | false |
lyft/graphite-web | webapp/graphite/finders/standard.py | 30 | 4096 | import os
from os.path import isdir, isfile, join, basename
from django.conf import settings
from graphite.logger import log
from graphite.node import BranchNode, LeafNode
from graphite.readers import WhisperReader, GzippedWhisperReader, RRDReader
from graphite.util import find_escaped_pattern_fields
from . import fs_to_metric, get_real_metric_path, match_entries
class StandardFinder:
DATASOURCE_DELIMITER = '::RRD_DATASOURCE::'
def __init__(self, directories=None):
directories = directories or settings.STANDARD_DIRS
self.directories = directories
def find_nodes(self, query):
clean_pattern = query.pattern.replace('\\', '')
pattern_parts = clean_pattern.split('.')
for root_dir in self.directories:
for absolute_path in self._find_paths(root_dir, pattern_parts):
if basename(absolute_path).startswith('.'):
continue
if self.DATASOURCE_DELIMITER in basename(absolute_path):
(absolute_path, datasource_pattern) = absolute_path.rsplit(self.DATASOURCE_DELIMITER, 1)
else:
datasource_pattern = None
relative_path = absolute_path[ len(root_dir): ].lstrip('/')
metric_path = fs_to_metric(relative_path)
real_metric_path = get_real_metric_path(absolute_path, metric_path)
metric_path_parts = metric_path.split('.')
for field_index in find_escaped_pattern_fields(query.pattern):
metric_path_parts[field_index] = pattern_parts[field_index].replace('\\', '')
metric_path = '.'.join(metric_path_parts)
# Now we construct and yield an appropriate Node object
if isdir(absolute_path):
yield BranchNode(metric_path)
elif isfile(absolute_path):
if absolute_path.endswith('.wsp') and WhisperReader.supported:
reader = WhisperReader(absolute_path, real_metric_path)
yield LeafNode(metric_path, reader)
elif absolute_path.endswith('.wsp.gz') and GzippedWhisperReader.supported:
reader = GzippedWhisperReader(absolute_path, real_metric_path)
yield LeafNode(metric_path, reader)
elif absolute_path.endswith('.rrd') and RRDReader.supported:
if datasource_pattern is None:
yield BranchNode(metric_path)
else:
for datasource_name in RRDReader.get_datasources(absolute_path):
if match_entries([datasource_name], datasource_pattern):
reader = RRDReader(absolute_path, datasource_name)
yield LeafNode(metric_path + "." + datasource_name, reader)
def _find_paths(self, current_dir, patterns):
"""Recursively generates absolute paths whose components underneath current_dir
match the corresponding pattern in patterns"""
pattern = patterns[0]
patterns = patterns[1:]
try:
entries = os.listdir(current_dir)
except OSError as e:
log.exception(e)
entries = []
subdirs = [entry for entry in entries if isdir(join(current_dir, entry))]
matching_subdirs = match_entries(subdirs, pattern)
if len(patterns) == 1 and RRDReader.supported: #the last pattern may apply to RRD data sources
files = [entry for entry in entries if isfile(join(current_dir, entry))]
rrd_files = match_entries(files, pattern + ".rrd")
if rrd_files: #let's assume it does
datasource_pattern = patterns[0]
for rrd_file in rrd_files:
absolute_path = join(current_dir, rrd_file)
yield absolute_path + self.DATASOURCE_DELIMITER + datasource_pattern
if patterns: #we've still got more directories to traverse
for subdir in matching_subdirs:
absolute_path = join(current_dir, subdir)
for match in self._find_paths(absolute_path, patterns):
yield match
else: #we've got the last pattern
files = [entry for entry in entries if isfile(join(current_dir, entry))]
matching_files = match_entries(files, pattern + '.*')
for base_name in matching_files + matching_subdirs:
yield join(current_dir, base_name)
| apache-2.0 | 1,509,728,894,003,232,500 | 38.76699 | 98 | 0.669434 | false |
oceanobservatories/mi-instrument | mi/platform/rsn/test/test_oms_client.py | 9 | 1674 | # #!/usr/bin/env python
#
# """
# @package ion.agents.platform.rsn.test.test_oms_client
# @file ion/agents/platform/rsn/test/test_oms_client.py
# @author Carlos Rueda
# @brief Test cases for CIOMSClient. The OMS enviroment variable can be used
# to indicate which CIOMSClient will be tested.
# """
#
# __author__ = 'Carlos Rueda'
# __license__ = 'Apache 2.0'
#
# from pyon.public import log
# from ion.agents.platform.rsn.simulator.logger import Logger
# Logger.set_logger(log)
#
# from pyon.util.int_test import IonIntegrationTestCase
#
# from ion.agents.platform.rsn.oms_client_factory import CIOMSClientFactory
# from ion.agents.platform.rsn.test.oms_test_mixin import OmsTestMixin
#
# from nose.plugins.attrib import attr
#
# import os
#
#
# @attr('INT', group='sa')
# class Test(IonIntegrationTestCase, OmsTestMixin):
# """
# The OMS enviroment variable can be used to indicate which CIOMSClient will
# be tested. By default, it tests against the simulator, which is launched
# as an external process.
# """
#
# @classmethod
# def setUpClass(cls):
# OmsTestMixin.setUpClass()
# if cls.using_actual_rsn_oms_endpoint():
# # use FQDM for local host if testing against actual RSN OMS:
# cls._use_fqdn_for_event_listener = True
#
# def setUp(self):
# oms_uri = os.getenv('OMS', "launchsimulator")
# oms_uri = self._dispatch_simulator(oms_uri)
# log.debug("oms_uri = %s", oms_uri)
# self.oms = CIOMSClientFactory.create_instance(oms_uri)
#
# def done():
# CIOMSClientFactory.destroy_instance(self.oms)
#
# self.addCleanup(done)
| bsd-2-clause | 1,153,583,282,259,573,200 | 31.192308 | 80 | 0.66368 | false |
dataxu/ansible | lib/ansible/modules/cloud/openstack/os_keypair.py | 5 | 4722 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
# Copyright (c) 2013, John Dewey <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keypair
short_description: Add/Delete a keypair from OpenStack
author: "Benno Joy (@bennojoy)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove key pair from OpenStack
options:
name:
description:
- Name that has to be given to the key pair
required: true
default: None
public_key:
description:
- The public key that would be uploaded to nova and injected into VMs
upon creation.
required: false
default: None
public_key_file:
description:
- Path to local file containing ssh public key. Mutually exclusive
with public_key.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: []
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
- os_keypair:
cloud: mordred
state: present
name: ansible_key
public_key_file: /home/me/.ssh/id_rsa.pub
# Creates a new key pair and the private key returned after the run.
- os_keypair:
cloud: rax-dfw
state: present
name: ansible_key
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the keypair.
returned: success
type: string
public_key:
description: The public key value for the keypair.
returned: success
type: string
private_key:
description: The private key value for the keypair.
returned: Only when a keypair is generated for the user (e.g., when creating one
and a public key is not specified).
type: string
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(module, keypair):
state = module.params['state']
if state == 'present' and not keypair:
return True
if state == 'absent' and keypair:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
public_key=dict(default=None),
public_key_file=dict(default=None),
state=dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[['public_key', 'public_key_file']])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
public_key = module.params['public_key']
if module.params['public_key_file']:
public_key = open(module.params['public_key_file']).read()
public_key = public_key.rstrip()
shade, cloud = openstack_cloud_from_module(module)
try:
keypair = cloud.get_keypair(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, keypair))
if state == 'present':
if keypair and keypair['name'] == name:
if public_key and (public_key != keypair['public_key']):
module.fail_json(
msg="Key name %s present but key hash not the same"
" as offered. Delete key first." % name
)
else:
changed = False
else:
keypair = cloud.create_keypair(name, public_key)
changed = True
module.exit_json(changed=changed,
key=keypair,
id=keypair['id'])
elif state == 'absent':
if keypair:
cloud.delete_keypair(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -8,686,226,670,674,974,000 | 27.969325 | 125 | 0.613088 | false |
yaojenkuo/BuildingMachineLearningSystemsWithPython | ch03/rel_post_20news.py | 24 | 3903 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sklearn.datasets
import scipy as sp
new_post = \
"""Disk drive problems. Hi, I have a problem with my hard disk.
After 1 year it is working only sporadically now.
I tried to format it, but now it doesn't boot any more.
Any ideas? Thanks.
"""
print("""\
Dear reader of the 1st edition of 'Building Machine Learning Systems with Python'!
For the 2nd edition we introduced a couple of changes that will result into
results that differ from the results in the 1st edition.
E.g. we now fully rely on scikit's fetch_20newsgroups() instead of requiring
you to download the data manually from MLCOMP.
If you have any questions, please ask at http://www.twotoreal.com
""")
all_data = sklearn.datasets.fetch_20newsgroups(subset="all")
print("Number of total posts: %i" % len(all_data.filenames))
# Number of total posts: 18846
groups = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space']
train_data = sklearn.datasets.fetch_20newsgroups(subset="train",
categories=groups)
print("Number of training posts in tech groups:", len(train_data.filenames))
# Number of training posts in tech groups: 3529
labels = train_data.target
num_clusters = 50 # sp.unique(labels).shape[0]
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,
stop_words='english', decode_error='ignore'
)
vectorized = vectorizer.fit_transform(train_data.data)
num_samples, num_features = vectorized.shape
print("#samples: %d, #features: %d" % (num_samples, num_features))
# samples: 3529, #features: 4712
from sklearn.cluster import KMeans
km = KMeans(n_clusters=num_clusters, n_init=1, verbose=1, random_state=3)
clustered = km.fit(vectorized)
print("km.labels_=%s" % km.labels_)
# km.labels_=[ 6 34 22 ..., 2 21 26]
print("km.labels_.shape=%s" % km.labels_.shape)
# km.labels_.shape=3529
from sklearn import metrics
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
# Homogeneity: 0.400
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
# Completeness: 0.206
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
# V-measure: 0.272
print("Adjusted Rand Index: %0.3f" %
metrics.adjusted_rand_score(labels, km.labels_))
# Adjusted Rand Index: 0.064
print("Adjusted Mutual Information: %0.3f" %
metrics.adjusted_mutual_info_score(labels, km.labels_))
# Adjusted Mutual Information: 0.197
print(("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(vectorized, labels, sample_size=1000)))
# Silhouette Coefficient: 0.006
new_post_vec = vectorizer.transform([new_post])
new_post_label = km.predict(new_post_vec)[0]
similar_indices = (km.labels_ == new_post_label).nonzero()[0]
similar = []
for i in similar_indices:
dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray())
similar.append((dist, train_data.data[i]))
similar = sorted(similar)
print("Count similar: %i" % len(similar))
show_at_1 = similar[0]
show_at_2 = similar[int(len(similar) / 10)]
show_at_3 = similar[int(len(similar) / 2)]
print("=== #1 ===")
print(show_at_1)
print()
print("=== #2 ===")
print(show_at_2)
print()
print("=== #3 ===")
print(show_at_3)
| mit | 4,443,673,887,272,943,600 | 32.358974 | 82 | 0.698437 | false |
hef/samba | python/samba/netcmd/spn.py | 46 | 7603 | # spn management
#
# Copyright Matthieu Patou [email protected] 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import ldb
from samba import provision
from samba.samdb import SamDB
from samba.auth import system_session
from samba.netcmd.common import _get_user_realm_domain
from samba.netcmd import (
Command,
CommandError,
SuperCommand,
Option
)
class cmd_spn_list(Command):
"""List spns of a given user."""
synopsis = "%prog <user> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["user"]
def run(self, user, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
sam = SamDB(paths.samdb, session_info=system_session(),
credentials=creds, lp=lp)
# TODO once I understand how, use the domain info to naildown
# to the correct domain
(cleaneduser, realm, domain) = _get_user_realm_domain(user)
self.outf.write(cleaneduser+"\n")
res = sam.search(
expression="samaccountname=%s" % ldb.binary_encode(cleaneduser),
scope=ldb.SCOPE_SUBTREE, attrs=["servicePrincipalName"])
if len(res) >0:
spns = res[0].get("servicePrincipalName")
found = False
flag = ldb.FLAG_MOD_ADD
if spns is not None:
self.outf.write(
"User %s has the following servicePrincipalName: \n" %
res[0].dn)
for e in spns:
self.outf.write("\t %s\n" % e)
else:
self.outf.write("User %s has no servicePrincipalName" %
res[0].dn)
else:
raise CommandError("User %s not found" % user)
class cmd_spn_add(Command):
"""Create a new spn."""
synopsis = "%prog <name> <user> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--force", help="Force the addition of the spn"
" even it exists already", action="store_true"),
]
takes_args = ["name", "user"]
def run(self, name, user, force=False, credopts=None, sambaopts=None,
versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
sam = SamDB(paths.samdb, session_info=system_session(),
credentials=creds, lp=lp)
res = sam.search(
expression="servicePrincipalName=%s" % ldb.binary_encode(name),
scope=ldb.SCOPE_SUBTREE)
if len(res) != 0 and not force:
raise CommandError("Service principal %s already"
" affected to another user" % name)
(cleaneduser, realm, domain) = _get_user_realm_domain(user)
res = sam.search(
expression="samaccountname=%s" % ldb.binary_encode(cleaneduser),
scope=ldb.SCOPE_SUBTREE, attrs=["servicePrincipalName"])
if len(res) >0:
res[0].dn
msg = ldb.Message()
spns = res[0].get("servicePrincipalName")
tab = []
found = False
flag = ldb.FLAG_MOD_ADD
if spns is not None:
for e in spns:
if str(e) == name:
found = True
tab.append(str(e))
flag = ldb.FLAG_MOD_REPLACE
tab.append(name)
msg.dn = res[0].dn
msg["servicePrincipalName"] = ldb.MessageElement(tab, flag,
"servicePrincipalName")
if not found:
sam.modify(msg)
else:
raise CommandError("Service principal %s already"
" affected to %s" % (name, user))
else:
raise CommandError("User %s not found" % user)
class cmd_spn_delete(Command):
"""Delete a spn."""
synopsis = "%prog <name> [user] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["name", "user?"]
def run(self, name, user=None, credopts=None, sambaopts=None,
versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
sam = SamDB(paths.samdb, session_info=system_session(),
credentials=creds, lp=lp)
res = sam.search(
expression="servicePrincipalName=%s" % ldb.binary_encode(name),
scope=ldb.SCOPE_SUBTREE,
attrs=["servicePrincipalName", "samAccountName"])
if len(res) >0:
result = None
if user is not None:
(cleaneduser, realm, domain) = _get_user_realm_domain(user)
for elem in res:
if str(elem["samAccountName"]).lower() == cleaneduser:
result = elem
if result is None:
raise CommandError("Unable to find user %s with"
" spn %s" % (user, name))
else:
if len(res) != 1:
listUser = ""
for r in res:
listUser = "%s\n%s" % (listUser, str(r.dn))
raise CommandError("More than one user has the spn %s "
"and no specific user was specified, list of users"
" with this spn:%s" % (name, listUser))
else:
result=res[0]
msg = ldb.Message()
spns = result.get("servicePrincipalName")
tab = []
if spns is not None:
for e in spns:
if str(e) != name:
tab.append(str(e))
flag = ldb.FLAG_MOD_REPLACE
msg.dn = result.dn
msg["servicePrincipalName"] = ldb.MessageElement(tab, flag,
"servicePrincipalName")
sam.modify(msg)
else:
raise CommandError("Service principal %s not affected" % name)
class cmd_spn(SuperCommand):
"""Service Principal Name (SPN) management."""
subcommands = {}
subcommands["add"] = cmd_spn_add()
subcommands["list"] = cmd_spn_list()
subcommands["delete"] = cmd_spn_delete()
| gpl-3.0 | -7,702,153,048,084,918,000 | 36.087805 | 79 | 0.549257 | false |
ace02000/pyload | module/plugins/accounts/FastshareCz.py | 1 | 1470 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import set_cookie
class FastshareCz(Account):
__name__ = "FastshareCz"
__type__ = "account"
__version__ = "0.11"
__status__ = "testing"
__description__ = """Fastshare.cz account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]"),
("stickell", "[email protected]")]
CREDIT_PATTERN = r'Credit\s*:\s*</td>\s*<td>(.+?)\s*<'
def grab_info(self, user, password, data):
validuntil = -1
trafficleft = None
premium = False
html = self.load("http://www.fastshare.cz/user")
m = re.search(self.CREDIT_PATTERN, html)
if m:
trafficleft = self.parse_traffic(m.group(1))
premium = bool(trafficleft)
return {'validuntil' : validuntil,
'trafficleft': trafficleft,
'premium' : premium}
def signin(self, user, password, data):
set_cookie(self.req.cj, "fastshare.cz", "lang", "en")
self.load('http://www.fastshare.cz/login') #@NOTE: Do not remove or it will not login
html = self.load("https://www.fastshare.cz/sql.php",
post={'login': user,
'heslo': password})
if ">Wrong username or password" in html:
self.fail_login()
| gpl-3.0 | -8,618,165,420,933,813,000 | 27.269231 | 94 | 0.543537 | false |
bowlofstew/common | api/biiapi.py | 5 | 2554 | from abc import ABCMeta, abstractmethod
from biicode.common.model.symbolic.reference import References
from biicode.common.edition.block_holder import BlockHolder
class BiiAPI(object):
'''The main interface to user-access biicode published information'''
#TODO: Clearly specify raised Exceptions in each method
#TODO: Validate implementations, to check that they really follow this specification
__metaclass__ = ABCMeta
def require_auth(self):
"""Require a logged username"""
raise NotImplementedError()
@abstractmethod
def get_dep_table(self, block_version):
"""
return: BlockVersionTable
"""
raise NotImplementedError()
@abstractmethod
def get_published_resources(self, references):
"""
param references: References
return: ReferencedResources
"""
raise NotImplementedError()
@abstractmethod
def get_cells_snapshot(self, block_version):
"""
return: [CellName] of the cells corresponding to such block_version
"""
raise NotImplementedError()
def get_block_holder(self, block_version):
""""
return: BlockHolder
"""
assert block_version.time is not None
refs = References()
block_cells_name = self.get_cells_snapshot(block_version)
refs[block_version] = set(block_cells_name)
resources = self.get_published_resources(refs)
return BlockHolder(block_version.block_name, resources[block_version])
@abstractmethod
def get_renames(self, brl_block, t1, t2):
'''return a Renames object (i.e. a dict{oldName:newName}'''
raise NotImplementedError()
@abstractmethod
def publish(self, publish_request):
raise NotImplementedError()
@abstractmethod
def get_version_delta_info(self, block_version):
raise NotImplementedError()
@abstractmethod
def get_version_by_tag(self, brl_block, version_tag):
raise NotImplementedError()
@abstractmethod
def get_block_info(self, brl_block):
raise NotImplementedError()
@abstractmethod
def find(self, finder_request, response):
'''Finder and updater
return a FinderResult'''
raise NotImplementedError()
@abstractmethod
def get_server_info(self):
''' Gets the server info ServerInfo object'''
raise NotImplementedError()
@abstractmethod
def authenticate(self):
''' Gets the token'''
raise NotImplementedError()
| mit | 2,509,508,830,543,154,000 | 29.404762 | 88 | 0.659358 | false |
gbriones1/cloud-init | cloudinit/sources/DataSourceNone.py | 15 | 1846 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import log as logging
from cloudinit import sources
LOG = logging.getLogger(__name__)
class DataSourceNone(sources.DataSource):
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
self.metadata = {}
self.userdata_raw = ''
def get_data(self):
# If the datasource config has any provided 'fallback'
# userdata or metadata, use it...
if 'userdata_raw' in self.ds_cfg:
self.userdata_raw = self.ds_cfg['userdata_raw']
if 'metadata' in self.ds_cfg:
self.metadata = self.ds_cfg['metadata']
return True
def get_instance_id(self):
return 'iid-datasource-none'
@property
def is_disconnected(self):
return True
# Used to match classes to dependencies
datasources = [
(DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
(DataSourceNone, []),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
| gpl-3.0 | -1,857,325,352,713,332,700 | 31.385965 | 74 | 0.680932 | false |
HerkCoin/herkcoin | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit | -3,160,543,746,266,249,700 | 24.611111 | 80 | 0.571584 | false |
jhonatajh/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/win/gyptest-link-opt-icf.py | 344 | 1319 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure comdat folding optimization setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('opt-icf.gyp', chdir=CHDIR)
test.build('opt-icf.gyp', chdir=CHDIR)
# We're specifying /DEBUG so the default is to not merge identical
# functions, so all of the similar_functions should be preserved.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_opticf_default.exe', chdir=CHDIR))
if output.count('similar_function') != 6: # 3 definitions, 3 calls.
test.fail_test()
# Explicitly off, all functions preserved seperately.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_opticf_no.exe', chdir=CHDIR))
if output.count('similar_function') != 6: # 3 definitions, 3 calls.
test.fail_test()
# Explicitly on, all but one removed.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_opticf_yes.exe', chdir=CHDIR))
if output.count('similar_function') != 4: # 1 definition, 3 calls.
test.fail_test()
test.pass_test()
| gpl-3.0 | -3,098,525,694,876,766,700 | 31.170732 | 78 | 0.691433 | false |
locationtech/geowave | python/src/main/python/pygw/store/accumulo/accumulo_options.py | 2 | 2877 | #
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.config import geowave_pkg
from pygw.store import DataStoreOptions
class AccumuloOptions(DataStoreOptions):
"""
Accumulo data store options.
"""
def __init__(self):
super().__init__(geowave_pkg.datastore.accumulo.config.AccumuloRequiredOptions())
def set_zookeeper(self, zookeeper):
"""
Sets the list of Zookeper servers that the Accumulo instance uses as a comma-separated
string.
Args:
zookeeper (str): A comma-separated list of Zookeeper servers.
"""
self._java_ref.setZookeeper(zookeeper)
def get_zookeeper(self):
"""
Returns:
A comma-separated list of Zookeper servers.
"""
return self._java_ref.getZookeeper()
def set_instance(self, instance):
"""
Sets the Accumulo instance ID to use for the data store.
Args:
instance (str): The Accumulo instance ID to use.
"""
self._java_ref.setInstance(instance)
def get_instance(self):
"""
Returns:
The Accumulo instance ID.
"""
return self._java_ref.getInstance()
def set_user(self, user):
"""
Sets the Accumulo user ID.
Args:
user (str): The Accumulo user ID.
"""
self._java_ref.setUser(user)
def get_user(self):
"""
Returns:
The Accumulo user ID.
"""
return self._java_ref.getUser()
def set_password(self, password):
"""
Sets the Accumulo password.
Args:
password (str): The Accumulo password.
"""
self._java_ref.setPassword(password)
def get_password(self):
"""
Returns:
The Accumulo password.
"""
return self._java_ref.getPassword()
def set_use_locality_groups(self, use_locality_groups):
"""
Sets whether or not to use locality groups.
Args:
use_locality_groups (bool): Whether or not to use locality groups.
"""
self._base_options.setUseLocalityGroups(use_locality_groups)
def is_use_locality_groups(self):
"""
Returns:
True if locality groups are enabled, False otherwise.
"""
return self._base_options.isUseLocalityGroups()
| apache-2.0 | 8,841,141,749,998,820,000 | 27.205882 | 97 | 0.582551 | false |
matthewrudy/kubernetes | cluster/juju/layers/kubernetes/reactive/k8s.py | 53 | 14370 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shlex import split
from shutil import copy2
from subprocess import check_call
from charms.docker.compose import Compose
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import is_leader
from charmhelpers.core.hookenv import status_set
from charmhelpers.core.templating import render
from charmhelpers.core import unitdata
from charmhelpers.core.host import chdir
from contextlib import contextmanager
@hook('config-changed')
def config_changed():
'''If the configuration values change, remove the available states.'''
config = hookenv.config()
if any(config.changed(key) for key in config.keys()):
hookenv.log('Configuration options have changed.')
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
hookenv.log('Removing kubelet container and kubelet.available state.')
# Stop and remove the Kubernetes kubelet container..
compose.kill('kubelet')
compose.rm('kubelet')
# Remove the state so the code can react to restarting kubelet.
remove_state('kubelet.available')
hookenv.log('Removing proxy container and proxy.available state.')
# Stop and remove the Kubernetes proxy container.
compose.kill('proxy')
compose.rm('proxy')
# Remove the state so the code can react to restarting proxy.
remove_state('proxy.available')
if config.changed('version'):
hookenv.log('Removing kubectl.downloaded state so the new version'
' of kubectl will be downloaded.')
remove_state('kubectl.downloaded')
@when('tls.server.certificate available')
@when_not('k8s.server.certificate available')
def server_cert():
'''When the server certificate is available, get the server certificate from
the charm unit data and write it to the proper directory. '''
destination_directory = '/srv/kubernetes'
# Save the server certificate from unitdata to /srv/kubernetes/server.crt
save_certificate(destination_directory, 'server')
# Copy the unitname.key to /srv/kubernetes/server.key
copy_key(destination_directory, 'server')
set_state('k8s.server.certificate available')
@when('tls.client.certificate available')
@when_not('k8s.client.certficate available')
def client_cert():
'''When the client certificate is available, get the client certificate
from the charm unitdata and write it to the proper directory. '''
destination_directory = '/srv/kubernetes'
if not os.path.isdir(destination_directory):
os.makedirs(destination_directory)
os.chmod(destination_directory, 0o770)
# The client certificate is also available on charm unitdata.
client_cert_path = 'easy-rsa/easyrsa3/pki/issued/client.crt'
kube_cert_path = os.path.join(destination_directory, 'client.crt')
if os.path.isfile(client_cert_path):
# Copy the client.crt to /srv/kubernetes/client.crt
copy2(client_cert_path, kube_cert_path)
# The client key is only available on the leader.
client_key_path = 'easy-rsa/easyrsa3/pki/private/client.key'
kube_key_path = os.path.join(destination_directory, 'client.key')
if os.path.isfile(client_key_path):
# Copy the client.key to /srv/kubernetes/client.key
copy2(client_key_path, kube_key_path)
@when('tls.certificate.authority available')
@when_not('k8s.certificate.authority available')
def ca():
'''When the Certificate Authority is available, copy the CA from the
/usr/local/share/ca-certificates/k8s.crt to the proper directory. '''
# Ensure the /srv/kubernetes directory exists.
directory = '/srv/kubernetes'
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Normally the CA is just on the leader, but the tls layer installs the
# CA on all systems in the /usr/local/share/ca-certificates directory.
ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(
hookenv.service_name())
# The CA should be copied to the destination directory and named 'ca.crt'.
destination_ca_path = os.path.join(directory, 'ca.crt')
if os.path.isfile(ca_path):
copy2(ca_path, destination_ca_path)
set_state('k8s.certificate.authority available')
@when('kubelet.available', 'proxy.available', 'cadvisor.available')
def final_messaging():
'''Lower layers emit messages, and if we do not clear the status messaging
queue, we are left with whatever the last method call sets status to. '''
# It's good UX to have consistent messaging that the cluster is online
if is_leader():
status_set('active', 'Kubernetes leader running')
else:
status_set('active', 'Kubernetes follower running')
@when('kubelet.available', 'proxy.available', 'cadvisor.available')
@when_not('skydns.available')
def launch_skydns():
'''Create a kubernetes service and resource controller for the skydns
service. '''
# Only launch and track this state on the leader.
# Launching duplicate SkyDNS rc will raise an error
if not is_leader():
return
cmd = "kubectl create -f files/manifests/skydns-rc.yml"
check_call(split(cmd))
cmd = "kubectl create -f files/manifests/skydns-svc.yml"
check_call(split(cmd))
set_state('skydns.available')
@when('docker.available')
@when_not('etcd.available')
def relation_message():
'''Take over messaging to let the user know they are pending a relationship
to the ETCD cluster before going any further. '''
status_set('waiting', 'Waiting for relation to ETCD')
@when('etcd.available', 'tls.server.certificate available')
@when_not('kubelet.available', 'proxy.available')
def master(etcd):
'''Install and run the hyperkube container that starts kubernetes-master.
This actually runs the kubelet, which in turn runs a pod that contains the
other master components. '''
render_files(etcd)
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
status_set('maintenance', 'Starting the Kubernetes kubelet container.')
# Start the Kubernetes kubelet container using docker-compose.
compose.up('kubelet')
set_state('kubelet.available')
# Open the secure port for api-server.
hookenv.open_port(6443)
status_set('maintenance', 'Starting the Kubernetes proxy container')
# Start the Kubernetes proxy container using docker-compose.
compose.up('proxy')
set_state('proxy.available')
status_set('active', 'Kubernetes started')
@when('proxy.available')
@when_not('kubectl.downloaded')
def download_kubectl():
'''Download the kubectl binary to test and interact with the cluster.'''
status_set('maintenance', 'Downloading the kubectl binary')
version = hookenv.config()['version']
cmd = 'wget -nv -O /usr/local/bin/kubectl https://storage.googleapis.com/' \
'kubernetes-release/release/{0}/bin/linux/amd64/kubectl'
cmd = cmd.format(version)
hookenv.log('Downloading kubelet: {0}'.format(cmd))
check_call(split(cmd))
cmd = 'chmod +x /usr/local/bin/kubectl'
check_call(split(cmd))
set_state('kubectl.downloaded')
status_set('active', 'Kubernetes installed')
@when('kubectl.downloaded')
@when_not('kubectl.package.created')
def package_kubectl():
'''Package the kubectl binary and configuration to a tar file for users
to consume and interact directly with Kubernetes.'''
if not is_leader():
return
context = 'default-context'
cluster_name = 'kubernetes'
public_address = hookenv.unit_public_ip()
directory = '/srv/kubernetes'
key = 'client.key'
ca = 'ca.crt'
cert = 'client.crt'
user = 'ubuntu'
port = '6443'
with chdir(directory):
# Create the config file with the external address for this server.
cmd = 'kubectl config set-cluster --kubeconfig={0}/config {1} ' \
'--server=https://{2}:{3} --certificate-authority={4}'
check_call(split(cmd.format(directory, cluster_name, public_address,
port, ca)))
# Create the credentials.
cmd = 'kubectl config set-credentials --kubeconfig={0}/config {1} ' \
'--client-key={2} --client-certificate={3}'
check_call(split(cmd.format(directory, user, key, cert)))
# Create a default context with the cluster.
cmd = 'kubectl config set-context --kubeconfig={0}/config {1}' \
' --cluster={2} --user={3}'
check_call(split(cmd.format(directory, context, cluster_name, user)))
# Now make the config use this new context.
cmd = 'kubectl config use-context --kubeconfig={0}/config {1}'
check_call(split(cmd.format(directory, context)))
# Copy the kubectl binary to this directory
cmd = 'cp -v /usr/local/bin/kubectl {0}'.format(directory)
check_call(split(cmd))
# Create an archive with all the necessary files.
cmd = 'tar -cvzf /home/ubuntu/kubectl_package.tar.gz ca.crt client.crt client.key config kubectl' # noqa
check_call(split(cmd))
set_state('kubectl.package.created')
@when('proxy.available')
@when_not('cadvisor.available')
def start_cadvisor():
'''Start the cAdvisor container that gives metrics about the other
application containers on this system. '''
compose = Compose('files/kubernetes')
compose.up('cadvisor')
set_state('cadvisor.available')
status_set('active', 'cadvisor running on port 8088')
hookenv.open_port(8088)
@when('sdn.available')
def gather_sdn_data():
'''Get the Software Defined Network (SDN) information and return it as a
dictionary.'''
# SDN Providers pass data via the unitdata.kv module
db = unitdata.kv()
# Generate an IP address for the DNS provider
subnet = db.get('sdn_subnet')
if subnet:
ip = subnet.split('/')[0]
dns_server = '.'.join(ip.split('.')[0:-1]) + '.10'
addedcontext = {}
addedcontext['dns_server'] = dns_server
return addedcontext
return {}
def copy_key(directory, prefix):
'''Copy the key from the easy-rsa/easyrsa3/pki/private directory to the
specified directory. '''
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Must remove the path characters from the local unit name.
path_name = hookenv.local_unit().replace('/', '_')
# The key is not in unitdata it is in the local easy-rsa directory.
local_key_path = 'easy-rsa/easyrsa3/pki/private/{0}.key'.format(path_name)
key_name = '{0}.key'.format(prefix)
# The key should be copied to this directory.
destination_key_path = os.path.join(directory, key_name)
# Copy the key file from the local directory to the destination.
copy2(local_key_path, destination_key_path)
def render_files(reldata=None):
'''Use jinja templating to render the docker-compose.yml and master.json
file to contain the dynamic data for the configuration files.'''
context = {}
# Load the context manager with sdn and config data.
context.update(gather_sdn_data())
context.update(hookenv.config())
if reldata:
context.update({'connection_string': reldata.connection_string()})
charm_dir = hookenv.charm_dir()
rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
if not os.path.exists(rendered_kube_dir):
os.makedirs(rendered_kube_dir)
rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
if not os.path.exists(rendered_manifest_dir):
os.makedirs(rendered_manifest_dir)
# Add the manifest directory so the docker-compose file can have.
context.update({'manifest_directory': rendered_manifest_dir,
'private_address': hookenv.unit_get('private-address')})
# Render the files/kubernetes/docker-compose.yml file that contains the
# definition for kubelet and proxy.
target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
render('docker-compose.yml', target, context)
# Render the files/manifests/master.json that contains parameters for the
# apiserver, controller, and controller-manager
target = os.path.join(rendered_manifest_dir, 'master.json')
render('master.json', target, context)
# Render files/kubernetes/skydns-svc.yaml for SkyDNS service
target = os.path.join(rendered_manifest_dir, 'skydns-svc.yml')
render('skydns-svc.yml', target, context)
# Render files/kubernetes/skydns-rc.yaml for SkyDNS pods
target = os.path.join(rendered_manifest_dir, 'skydns-rc.yml')
render('skydns-rc.yml', target, context)
def save_certificate(directory, prefix):
'''Get the certificate from the charm unitdata, and write it to the proper
directory. The parameters are: destination directory, and prefix to use
for the key and certificate name.'''
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Grab the unitdata key value store.
store = unitdata.kv()
certificate_data = store.get('tls.{0}.certificate'.format(prefix))
certificate_name = '{0}.crt'.format(prefix)
# The certificate should be saved to this directory.
certificate_path = os.path.join(directory, certificate_name)
# write the server certificate out to the correct location
with open(certificate_path, 'w') as fp:
fp.write(certificate_data)
| apache-2.0 | -5,711,763,184,718,239,000 | 41.64095 | 113 | 0.693946 | false |
jephianlin/minrank_aux | xi_dict.py | 1 | 6449 | print("---SAPreduced_matrix, has_SAP, find_ZFloor, Zsap, etc.")
def SAPmatrix(A):
"""
Input: a symmetric matrix A
Output: The matrix for checking if A has SAP
"""
if A.is_symmetric()==False:
raise ValueError("Input matrix is not symmetric.")
AA=[];
n=A.dimensions()[0];
row_num=0;
for i in range(n):
for j in range(n):
AA.append([0]*(n*n));
if A[i][j]!=0 or i==j:
AA[row_num][i*n+j]=1;
if A[i][j]==0 and i!=j:
AA[row_num][i*n+j]=1;
AA[row_num][j*n+i]=-1;
row_num+=1;
BB=identity_matrix(n).tensor_product(A);
for row in BB.rows():
AA.append(row);
return matrix(AA);
def SAPreduced_matrix(A):
"""
Input: a symmetric matrix A
Output: the reduced matrix for checking if A has SAP
"""
if A.is_symmetric()==False:
raise ValueError("Input matrix is not symmetric.")
AA=[];
n=A.dimensions()[0];
nonedge=0;
for i in range(n):
for j in range(i+1,n):
if A[i][j]==0:
AA.append([0]*(n*n));
i_start=i*n;
j_start=j*n;
for k in range(n):
AA[nonedge][i_start+k]=A[j][k];
AA[nonedge][j_start+k]=A[i][k];
nonedge+=1;
return matrix(AA).transpose();
def has_SAP(A):
"""
Input: a symmetric matrix A
Output: True if A has Strong Arnold Property; False if A does not.
"""
##SAPreduced_matrix is faster than SAPmatrix
##AA=SAPmatrix(A);
##if AA.rank()==AA.dimensions()[1]:
AA=SAPreduced_matrix(A);
if AA.rank()==AA.dimensions()[1]:
return True;
else:
return False;
def ful_annihilator(A):
"""
Input: a symmetric matrix A
Output: 0 if A has SAP; otherwise return the basis of ful_annihilators of A;
"""
n=A.dimensions()[0];
AA=SAPmatrix(A);
ker=AA.right_kernel();
if ker.dimension()==0:
return 0;
else:
basis=[];
for v in ker.basis():
list_v=list(v);
basis.append(matrix(n,n,list_v));
return basis;
def ZFloor_game(g,done,act,token,chron=False):
"""
g: considered graph
done: list of blue vertices that can no longer move (token are taken)
act: list of active blue vertices
token: integer of available tokens
Output True if it is a Zfloor forcing set; False if not. To see chron list, set chron=True.
"""
##for graphs and lists, we need to make a copy.
h=g.copy()
this_done=[];
this_act=[];
for v in done:
h.delete_vertex(v);
this_done.append(v);
for v in act:
this_act.append(v);
##Do conventional CRC as possible, and collect tokens.
##delete every edges between this_act.
for u,w in Combinations(this_act,2):
h.delete_edge(u,w);
again=True;
while again:
again=False;
for v in this_act:
if h.degree(v)==1:
u=h.neighbors(v)[0];
this_act.append(u);
this_act.remove(v);
this_done.append(v);
h.delete_vertex(v);
for w in this_act:
h.delete_edge(u,w);
again=True;
break;
if h.degree(v)==0:
token+=1;
this_act.remove(v);
this_done.append(v);
h.delete_vertex(v);
again=True;
if h.order()==0:
return True;
if h.order()!=0 and token==0:
return False;
##Find white set
white=h.vertices();
for v in this_act:
white.remove(v);
##Do recursion.
if token>=len(white):
return True;
else:
for new_act in Combinations(white,token):
if ZFloor_game(g,this_done,this_act+new_act,0)==True:
return True;
return False;
def find_ZFloor(g):
"""
Input: a simple graph g
Output: the ZFloor of g
"""
ZF=g.order()-1;
if ZF<0:
return ZF+1;
try_lower=True;
while try_lower:
try_lower=False;
if ZFloor_game(g,[],[],ZF)==True:
try_lower=True;
ZF+=-1;
return ZF+1;
T3FamilyString=['C~', 'DFw','EBnW','F@QZo','G?Gisg','H??@qiK']
T3Family=[Graph(stg) for stg in T3FamilyString];
def xi_ubd(g):
C=g.connected_components_subgraphs();
if len(C)==1:
ubd=find_ZFloor(g);
e=g.size();
if g.is_bipartite():
#print "bipartite"
ubd=min(ubd,int(-0.5+sqrt(2.25+2*e)));
else:
#print "not bipartite"
ubd=min(ubd,int(-0.5+sqrt(0.25+2*e)));
if g.is_tree():
#print "tree"
ubd=min(ubd,2);
return ubd;
else:
ubd=0;
for com in C:
ubd=max(ubd,xi_ubd(com));
return ubd;
def xi_lbd(g):
###SUPER long...
lbd=1;
if g.is_forest()==False or max(g.degree_sequence())>=3:
lbd=2;
for t in T3Family:
if has_minor(g,t):
return 3;
return lbd;
##This function requires gzerosgame and find_gzfs functions in oc_diag_analysis.sage
def SAPreduced_mr(g,non_singular=False):
n=g.order();
A=g.adjacency_matrix()-identity_matrix(n);
AA=SAPreduced_matrix(A);
##rows should be n^2; cols should be number of nonedges
rows,cols=AA.dimensions();
##Set X as -1~-rows and Y as 1~cols
X=[];
for i in range(1,rows+1):
X.append(-i);
Y=[];
for i in range(1,cols+1):
Y.append(i);
##NOTE: the labeling of graphs start at 1 and -1, but not 0
## but the labeling of the matrix start at 0 for both rows and columns
SAP_g=Graph(0);
SAP_g.add_vertices(X);
SAP_g.add_vertices(Y);
##setting edges and banned set
B=[];
for i in range(rows):
for j in range(cols):
if AA[i][j]!=0:
SAP_g.add_edge(-i-1,j+1);
if AA[i][j]==-1:
B.append((-i-1,j+1));
##For debug
#show(AA);
#show(SAP_g);
#print B;
if non_singular==False:
return rows+cols-find_gZ(SAP_g, X, B);
if non_singular==True:
#print gzerosgame(SAP_g, X, B);
return len(gzerosgame(SAP_g, X, B))==rows+cols;
| gpl-2.0 | -916,061,750,009,489,300 | 27.662222 | 95 | 0.509846 | false |
Lh4cKg/sl4a | python/src/Lib/test/test___all__.py | 52 | 6085 | import unittest
from test.test_support import run_unittest
import sys
import warnings
class AllTest(unittest.TestCase):
def check_all(self, modname):
names = {}
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".* (module|package)",
DeprecationWarning)
try:
exec "import %s" % modname in names
except ImportError:
# Silent fail here seems the best route since some modules
# may not be available in all environments.
return
self.failUnless(hasattr(sys.modules[modname], "__all__"),
"%s has no __all__ attribute" % modname)
names = {}
exec "from %s import *" % modname in names
if "__builtins__" in names:
del names["__builtins__"]
keys = set(names)
all = set(sys.modules[modname].__all__)
self.assertEqual(keys, all)
def test_all(self):
if not sys.platform.startswith('java'):
# In case _socket fails to build, make this test fail more gracefully
# than an AttributeError somewhere deep in CGIHTTPServer.
import _socket
self.check_all("BaseHTTPServer")
self.check_all("Bastion")
self.check_all("CGIHTTPServer")
self.check_all("ConfigParser")
self.check_all("Cookie")
self.check_all("MimeWriter")
self.check_all("Queue")
self.check_all("SimpleHTTPServer")
self.check_all("SocketServer")
self.check_all("StringIO")
self.check_all("UserString")
self.check_all("aifc")
self.check_all("atexit")
self.check_all("audiodev")
self.check_all("base64")
self.check_all("bdb")
self.check_all("binhex")
self.check_all("calendar")
self.check_all("cgi")
self.check_all("cmd")
self.check_all("code")
self.check_all("codecs")
self.check_all("codeop")
self.check_all("colorsys")
self.check_all("commands")
self.check_all("compileall")
self.check_all("copy")
self.check_all("copy_reg")
self.check_all("csv")
self.check_all("dbhash")
self.check_all("decimal")
self.check_all("difflib")
self.check_all("dircache")
self.check_all("dis")
self.check_all("doctest")
self.check_all("dummy_thread")
self.check_all("dummy_threading")
self.check_all("filecmp")
self.check_all("fileinput")
self.check_all("fnmatch")
self.check_all("fpformat")
self.check_all("ftplib")
self.check_all("getopt")
self.check_all("getpass")
self.check_all("gettext")
self.check_all("glob")
self.check_all("gzip")
self.check_all("heapq")
self.check_all("htmllib")
self.check_all("httplib")
self.check_all("ihooks")
self.check_all("imaplib")
self.check_all("imghdr")
self.check_all("imputil")
self.check_all("keyword")
self.check_all("linecache")
self.check_all("locale")
self.check_all("macpath")
self.check_all("macurl2path")
self.check_all("mailbox")
self.check_all("mailcap")
self.check_all("mhlib")
self.check_all("mimetools")
self.check_all("mimetypes")
self.check_all("mimify")
self.check_all("multifile")
self.check_all("netrc")
self.check_all("nntplib")
self.check_all("ntpath")
self.check_all("opcode")
self.check_all("optparse")
self.check_all("os")
self.check_all("os2emxpath")
self.check_all("pdb")
self.check_all("pickle")
self.check_all("pickletools")
self.check_all("pipes")
self.check_all("popen2")
self.check_all("poplib")
self.check_all("posixpath")
self.check_all("pprint")
self.check_all("profile")
self.check_all("pstats")
self.check_all("pty")
self.check_all("py_compile")
self.check_all("pyclbr")
self.check_all("quopri")
self.check_all("random")
self.check_all("re")
self.check_all("repr")
self.check_all("rexec")
self.check_all("rfc822")
self.check_all("rlcompleter")
self.check_all("robotparser")
self.check_all("sched")
self.check_all("sets")
self.check_all("sgmllib")
self.check_all("shelve")
self.check_all("shlex")
self.check_all("shutil")
self.check_all("smtpd")
self.check_all("smtplib")
self.check_all("sndhdr")
self.check_all("socket")
self.check_all("_strptime")
self.check_all("symtable")
self.check_all("tabnanny")
self.check_all("tarfile")
self.check_all("telnetlib")
self.check_all("tempfile")
self.check_all("test.test_support")
self.check_all("textwrap")
self.check_all("threading")
self.check_all("timeit")
self.check_all("toaiff")
self.check_all("tokenize")
self.check_all("traceback")
self.check_all("tty")
self.check_all("unittest")
self.check_all("urllib")
self.check_all("urlparse")
self.check_all("uu")
self.check_all("warnings")
self.check_all("wave")
self.check_all("weakref")
self.check_all("webbrowser")
self.check_all("xdrlib")
self.check_all("zipfile")
# rlcompleter needs special consideration; it import readline which
# initializes GNU readline which calls setlocale(LC_CTYPE, "")... :-(
try:
self.check_all("rlcompleter")
finally:
try:
import locale
except ImportError:
pass
else:
locale.setlocale(locale.LC_CTYPE, 'C')
def test_main():
run_unittest(AllTest)
if __name__ == "__main__":
test_main()
| apache-2.0 | 1,099,909,066,978,600,300 | 32.251366 | 81 | 0.5553 | false |
Andr3iC/courtlistener | cleaning_scripts/correct_links_to_resource_org_186.py | 5 | 2193 | import os
import sys
execfile('/etc/courtlistener')
sys.path.append(INSTALL_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from alert.search.models import Document, Citation
from alert.lib.db_tools import queryset_generator
from alert.lib.string_utils import clean_string
from alert.lib.string_utils import harmonize
from alert.lib.string_utils import titlecase
from optparse import OptionParser
def link_fixer(link):
"""Fixes the errors in a link
Orig: http://bulk.resource.org/courts.gov/c/US/819/996.F2d.311.html
Fixed: http://bulk.resource.org/courts.gov/c/F2/996/996.F2d.311.html
"""
# Very crude and lazy replacement of US with F2
link_parts = link.split('US')
fixed = 'F2'.join(link_parts)
# Fixes the number
link_parts = fixed.split('/')
number = int(link_parts[-2]) + 177
fixed = '/'.join(link_parts[0:-2]) + "/" + str(number) + "/" + str(link_parts[-1])
return fixed
def cleaner(simulate=False, verbose=False):
docs = queryset_generator(Document.objects.filter(source = 'R', time_retrieved__gt = '2011-06-01'))
for doc in docs:
original_link = doc.download_url
fixed = link_fixer(original_link)
doc.download_url = fixed
if verbose:
print "Changing: " + original_link
print " to: " + fixed
if not simulate:
doc.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help="Simulate the corrections without " + \
"actually making them.")
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return cleaner(simulate, verbose)
if __name__ == '__main__':
main()
| agpl-3.0 | 2,335,938,182,902,024,000 | 31.25 | 103 | 0.617875 | false |
Fokko/incubator-airflow | airflow/contrib/hooks/gdrive_hook.py | 1 | 5267 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Google Drive service"""
from typing import Any, Optional
from googleapiclient.discovery import Resource, build
from googleapiclient.http import MediaFileUpload
from airflow.gcp.hooks.base import CloudBaseHook
# noinspection PyAbstractClass
class GoogleDriveHook(CloudBaseHook):
"""
Hook for the Google Drive APIs.
:param api_version: API version used (for example v3).
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
_conn = None # type: Optional[Resource]
def __init__(
self,
api_version: str = "v3",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
def get_conn(self) -> Any:
"""
Retrieves the connection to Google Drive.
:return: Google Drive services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("drive", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def _ensure_folders_exists(self, path: str) -> str:
service = self.get_conn()
current_parent = "root"
folders = path.split("/")
depth = 0
# First tries to enter directories
for current_folder in folders:
self.log.debug("Looking for %s directory with %s parent", current_folder, current_parent)
conditions = [
"mimeType = 'application/vnd.google-apps.folder'",
"name='{}'".format(current_folder),
"'{}' in parents".format(current_parent),
]
result = (
service.files() # pylint: disable=no-member
.list(q=" and ".join(conditions), spaces="drive", fields="files(id, name)")
.execute(num_retries=self.num_retries)
)
files = result.get("files", [])
if not files:
self.log.info("Not found %s directory", current_folder)
# If the directory does not exist, break loops
break
depth += 1
current_parent = files[0].get("id")
# Check if there are directories to process
if depth != len(folders):
# Create missing directories
for current_folder in folders[depth:]:
file_metadata = {
"name": current_folder,
"mimeType": "application/vnd.google-apps.folder",
"parents": [current_parent],
}
file = (
service.files() # pylint: disable=no-member
.create(body=file_metadata, fields="id")
.execute(num_retries=self.num_retries)
)
self.log.info("Created %s directory", current_folder)
current_parent = file.get("id")
# Return the ID of the last directory
return current_parent
def upload_file(self, local_location: str, remote_location: str) -> str:
"""
Uploads a file that is available locally to a Google Drive service.
:param local_location: The path where the file is available.
:type local_location: str
:param remote_location: The path where the file will be send
:type remote_location: str
:return: File ID
:rtype: str
"""
service = self.get_conn()
directory_path, _, filename = remote_location.rpartition("/")
if directory_path:
parent = self._ensure_folders_exists(directory_path)
else:
parent = "root"
file_metadata = {"name": filename, "parents": [parent]}
media = MediaFileUpload(local_location)
file = (
service.files() # pylint: disable=no-member
.create(body=file_metadata, media_body=media, fields="id")
.execute(num_retries=self.num_retries)
)
self.log.info("File %s uploaded to gdrive://%s.", local_location, remote_location)
return file.get("id")
| apache-2.0 | -3,808,183,682,733,788,000 | 37.445255 | 102 | 0.601291 | false |
zzjkf2009/Midterm_Astar | opencv/samples/python/squares.py | 1 | 1774 | #!/usr/bin/env python
'''
Simple "Square Detector" program.
Loads several images sequentially and tries to find squares in each image.
'''
# Python 2/3 compatibility
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
if __name__ == '__main__':
from glob import glob
for fn in glob('../data/pic*.png'):
img = cv2.imread(fn)
squares = find_squares(img)
cv2.drawContours( img, squares, -1, (0, 255, 0), 3 )
cv2.imshow('squares', img)
ch = cv2.waitKey()
if ch == 27:
break
cv2.destroyAllWindows()
| mit | 6,742,635,926,271,350,000 | 31.254545 | 110 | 0.543405 | false |
rbian/virt-test | tools/package_jeos.py | 15 | 1491 | #!/usr/bin/python
import os
import sys
import logging
import shutil
import common
from autotest.client import utils
from autotest.client.shared import logging_manager
from virttest import utils_misc
def package_jeos(img):
"""
Package JeOS and make it ready for upload.
Steps:
1) Move /path/to/jeos.qcow2 to /path/to/jeos.qcow2.backup
2) Sparsify the image, creating a new, trimmed down /path/to/jeos.qcow2
3) Compress the sparsified image with 7za
:param img: Path to a qcow2 image
"""
basedir = os.path.dirname(img)
backup = img + '.backup'
qemu_img = utils_misc.find_command('qemu-img')
shutil.move(img, backup)
logging.info("Backup %s saved", backup)
utils.system("%s convert -f qcow2 -O qcow2 %s %s" % (qemu_img, backup, img))
logging.info("Sparse file %s created successfully", img)
archiver = utils_misc.find_command('7za')
compressed_img = img + ".7z"
utils.system("%s a %s %s" % (archiver, compressed_img, img))
logging.info("JeOS compressed file %s created successfuly",
compressed_img)
if __name__ == "__main__":
logging_manager.configure_logging(utils_misc.VirtLoggingConfig(),
verbose=True)
if len(sys.argv) <= 1:
logging.info("Usage: %s [path to freshly installed JeOS qcow2 image]",
sys.argv[0])
sys.exit(1)
path = sys.argv[1]
image = os.path.abspath(path)
package_jeos(image)
| gpl-2.0 | 8,227,865,220,899,311,000 | 27.673077 | 80 | 0.637156 | false |
husni75/p2pool_idc | p2pool/bitcoin/sha256.py | 285 | 3084 | from __future__ import division
import struct
k = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
]
def process(state, chunk):
def rightrotate(x, n):
return (x >> n) | (x << 32 - n) % 2**32
w = list(struct.unpack('>16I', chunk))
for i in xrange(16, 64):
s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ (w[i-15] >> 3)
s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ (w[i-2] >> 10)
w.append((w[i-16] + s0 + w[i-7] + s1) % 2**32)
a, b, c, d, e, f, g, h = start_state = struct.unpack('>8I', state)
for k_i, w_i in zip(k, w):
t1 = (h + (rightrotate(e, 6) ^ rightrotate(e, 11) ^ rightrotate(e, 25)) + ((e & f) ^ (~e & g)) + k_i + w_i) % 2**32
a, b, c, d, e, f, g, h = (
(t1 + (rightrotate(a, 2) ^ rightrotate(a, 13) ^ rightrotate(a, 22)) + ((a & b) ^ (a & c) ^ (b & c))) % 2**32,
a, b, c, (d + t1) % 2**32, e, f, g,
)
return struct.pack('>8I', *((x + y) % 2**32 for x, y in zip(start_state, [a, b, c, d, e, f, g, h])))
initial_state = struct.pack('>8I', 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19)
class sha256(object):
digest_size = 256//8
block_size = 512//8
def __init__(self, data='', _=(initial_state, '', 0)):
self.state, self.buf, self.length = _
self.update(data)
def update(self, data):
state = self.state
buf = self.buf + data
chunks = [buf[i:i + self.block_size] for i in xrange(0, len(buf) + 1, self.block_size)]
for chunk in chunks[:-1]:
state = process(state, chunk)
self.state = state
self.buf = chunks[-1]
self.length += 8*len(data)
def copy(self, data=''):
return self.__class__(data, (self.state, self.buf, self.length))
def digest(self):
state = self.state
buf = self.buf + '\x80' + '\x00'*((self.block_size - 9 - len(self.buf)) % self.block_size) + struct.pack('>Q', self.length)
for chunk in [buf[i:i + self.block_size] for i in xrange(0, len(buf), self.block_size)]:
state = process(state, chunk)
return state
def hexdigest(self):
return self.digest().encode('hex')
| gpl-3.0 | -4,307,064,108,313,667,000 | 40.12 | 131 | 0.582361 | false |
aisipos/django | django/db/backends/base/operations.py | 43 | 23170 | import datetime
import decimal
import warnings
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
# RemovedInDjango20Warning
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
def to_unicode(s):
return force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc.). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes an SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
warnings.warn(
"check_aggregate_support has been deprecated. Use "
"check_expression_support instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.check_expression_support(aggregate_func)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return '%s'
def modify_insert_params(self, placeholder, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
raise NotImplementedError("This backend does not support %s subtraction." % internal_type)
| bsd-3-clause | 7,800,601,149,006,423,000 | 37.171334 | 117 | 0.635865 | false |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_strftime.py | 132 | 6956 | """
Unittest for time.strftime
"""
import calendar
import sys
import re
from test import test_support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace('\%', '%')
new_text = new_text.replace('\:', ':')
new_text = new_text.replace('\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def __init__(self, *k, **kw):
unittest.TestCase.__init__(self, *k, **kw)
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
import locale
locale.setlocale(locale.LC_TIME, 'C')
def test_strftime(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if test_support.verbose:
print "Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0])
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if test_support.verbose:
print "strftime test for", time.ctime(now)
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError, error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result))
def strftest2(self, now):
nowsecs = str(long(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError, result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if test_support.verbose:
print msg
continue
if re.match(escapestr(e[1], self.ampm), result):
if test_support.verbose:
print "Supports nonstandard '%s' format (%s)" % (e[0], e[2])
elif not result or result[0] == '%':
if test_support.verbose:
print "Does not appear to support '%s' format (%s)" % \
(e[0], e[2])
else:
if test_support.verbose:
print "Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2])
print " Expected %s, but got %s" % (e[1], result)
def test_main():
test_support.run_unittest(StrftimeTest)
if __name__ == '__main__':
test_main()
| gpl-2.0 | 7,704,706,241,512,089,000 | 36.6 | 83 | 0.475273 | false |
Don42/youtube-dl | youtube_dl/extractor/ina.py | 129 | 1064 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class InaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ina\.fr/video/(?P<id>I?[A-Z0-9]+)'
_TEST = {
'url': 'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
'md5': 'a667021bf2b41f8dc6049479d9bb38a3',
'info_dict': {
'id': 'I12055569',
'ext': 'mp4',
'title': 'François Hollande "Je crois que c\'est clair"',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mrss_url = 'http://player.ina.fr/notices/%s.mrss' % video_id
info_doc = self._download_xml(mrss_url, video_id)
self.report_extraction(video_id)
video_url = info_doc.find('.//{http://search.yahoo.com/mrss/}player').attrib['url']
return {
'id': video_id,
'url': video_url,
'title': info_doc.find('.//title').text,
}
| unlicense | -5,457,803,111,174,172,000 | 28.527778 | 105 | 0.555974 | false |
xpac1985/ansible | lib/ansible/plugins/action/win_copy.py | 185 | 1153 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.plugins.action.copy import ActionModule as CopyActionModule
# Even though CopyActionModule inherits from ActionBase, we still need to
# directly inherit from ActionBase to appease the plugin loader.
class ActionModule(CopyActionModule, ActionBase):
pass
| gpl-3.0 | -9,062,848,118,106,574,000 | 38.758621 | 73 | 0.774501 | false |
blindroot/django | django/contrib/gis/db/backends/spatialite/base.py | 445 | 3615 | import sys
from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (
Database, DatabaseWrapper as SQLiteDatabaseWrapper, SQLiteCursorWrapper,
)
from django.utils import six
from .client import SpatiaLiteClient
from .features import DatabaseFeatures
from .introspection import SpatiaLiteIntrospection
from .operations import SpatiaLiteOperations
from .schema import SpatialiteSchemaEditor
class DatabaseWrapper(SQLiteDatabaseWrapper):
SchemaEditorClass = SpatialiteSchemaEditor
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.introspection = SpatiaLiteIntrospection(self)
def get_new_connection(self, conn_params):
conn = super(DatabaseWrapper, self).get_new_connection(conn_params)
# Enabling extension loading on the SQLite connection.
try:
conn.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured(
'The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.')
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = conn.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
new_msg = (
'Unable to load the SpatiaLite library extension '
'"%s" because: %s') % (self.spatialite_lib, msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
cur.close()
return conn
def prepare_database(self):
super(DatabaseWrapper, self).prepare_database()
# Check if spatial metadata have been initialized in the database
with self.cursor() as cursor:
cursor.execute("PRAGMA table_info(geometry_columns);")
if cursor.fetchall() == []:
arg = "1" if self.features.supports_initspatialmetadata_in_one_transaction else ""
cursor.execute("SELECT InitSpatialMetaData(%s)" % arg)
| bsd-3-clause | -3,020,421,747,256,024,600 | 47.2 | 98 | 0.643707 | false |
RO-ny9/python-for-android | python3-alpha/python3-src/Lib/idlelib/AutoComplete.py | 67 | 9061 | """AutoComplete.py - An IDLE extension for automatically completing names.
This extension can complete either attribute names of file names. It can pop
a window with all available names, for the user to select from.
"""
import os
import sys
import string
from idlelib.configHandler import idleConf
# This string includes all chars that may be in a file name (without a path
# separator)
FILENAME_CHARS = string.ascii_letters + string.digits + os.curdir + "._~#$:-"
# This string includes all chars that may be in an identifier
ID_CHARS = string.ascii_letters + string.digits + "_"
# These constants represent the two different types of completions
COMPLETE_ATTRIBUTES, COMPLETE_FILES = range(1, 2+1)
from idlelib import AutoCompleteWindow
from idlelib.HyperParser import HyperParser
import __main__
SEPS = os.sep
if os.altsep: # e.g. '/' on Windows...
SEPS += os.altsep
class AutoComplete:
menudefs = [
('edit', [
("Show Completions", "<<force-open-completions>>"),
])
]
popupwait = idleConf.GetOption("extensions", "AutoComplete",
"popupwait", type="int", default=0)
def __init__(self, editwin=None):
self.editwin = editwin
if editwin is None: # subprocess and test
return
self.text = editwin.text
self.autocompletewindow = None
# id of delayed call, and the index of the text insert when the delayed
# call was issued. If _delayed_completion_id is None, there is no
# delayed call.
self._delayed_completion_id = None
self._delayed_completion_index = None
def _make_autocomplete_window(self):
return AutoCompleteWindow.AutoCompleteWindow(self.text)
def _remove_autocomplete_window(self, event=None):
if self.autocompletewindow:
self.autocompletewindow.hide_window()
self.autocompletewindow = None
def force_open_completions_event(self, event):
"""Happens when the user really wants to open a completion list, even
if a function call is needed.
"""
self.open_completions(True, False, True)
def try_open_completions_event(self, event):
"""Happens when it would be nice to open a completion list, but not
really necessary, for example after an dot, so function
calls won't be made.
"""
lastchar = self.text.get("insert-1c")
if lastchar == ".":
self._open_completions_later(False, False, False,
COMPLETE_ATTRIBUTES)
elif lastchar in SEPS:
self._open_completions_later(False, False, False,
COMPLETE_FILES)
def autocomplete_event(self, event):
"""Happens when the user wants to complete his word, and if necessary,
open a completion list after that (if there is more than one
completion)
"""
if hasattr(event, "mc_state") and event.mc_state:
# A modifier was pressed along with the tab, continue as usual.
return
if self.autocompletewindow and self.autocompletewindow.is_active():
self.autocompletewindow.complete()
return "break"
else:
opened = self.open_completions(False, True, True)
if opened:
return "break"
def _open_completions_later(self, *args):
self._delayed_completion_index = self.text.index("insert")
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = \
self.text.after(self.popupwait, self._delayed_open_completions,
*args)
def _delayed_open_completions(self, *args):
self._delayed_completion_id = None
if self.text.index("insert") != self._delayed_completion_index:
return
self.open_completions(*args)
def open_completions(self, evalfuncs, complete, userWantsWin, mode=None):
"""Find the completions and create the AutoCompleteWindow.
Return True if successful (no syntax error or so found).
if complete is True, then if there's nothing to complete and no
start of completion, won't open completions and return False.
If mode is given, will open a completion list only in this mode.
"""
# Cancel another delayed call, if it exists.
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = None
hp = HyperParser(self.editwin, "insert")
curline = self.text.get("insert linestart", "insert")
i = j = len(curline)
if hp.is_in_string() and (not mode or mode==COMPLETE_FILES):
self._remove_autocomplete_window()
mode = COMPLETE_FILES
while i and curline[i-1] in FILENAME_CHARS:
i -= 1
comp_start = curline[i:j]
j = i
while i and curline[i-1] in FILENAME_CHARS + SEPS:
i -= 1
comp_what = curline[i:j]
elif hp.is_in_code() and (not mode or mode==COMPLETE_ATTRIBUTES):
self._remove_autocomplete_window()
mode = COMPLETE_ATTRIBUTES
while i and curline[i-1] in ID_CHARS:
i -= 1
comp_start = curline[i:j]
if i and curline[i-1] == '.':
hp.set_index("insert-%dc" % (len(curline)-(i-1)))
comp_what = hp.get_expression()
if not comp_what or \
(not evalfuncs and comp_what.find('(') != -1):
return
else:
comp_what = ""
else:
return
if complete and not comp_what and not comp_start:
return
comp_lists = self.fetch_completions(comp_what, mode)
if not comp_lists[0]:
return
self.autocompletewindow = self._make_autocomplete_window()
self.autocompletewindow.show_window(comp_lists,
"insert-%dc" % len(comp_start),
complete,
mode,
userWantsWin)
return True
def fetch_completions(self, what, mode):
"""Return a pair of lists of completions for something. The first list
is a sublist of the second. Both are sorted.
If there is a Python subprocess, get the comp. list there. Otherwise,
either fetch_completions() is running in the subprocess itself or it
was called in an IDLE EditorWindow before any script had been run.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_completion_list",
(what, mode), {})
else:
if mode == COMPLETE_ATTRIBUTES:
if what == "":
namespace = __main__.__dict__.copy()
namespace.update(__main__.__builtins__.__dict__)
bigl = eval("dir()", namespace)
bigl.sort()
if "__all__" in bigl:
smalll = eval("__all__", namespace)
smalll.sort()
else:
smalll = [s for s in bigl if s[:1] != '_']
else:
try:
entity = self.get_entity(what)
bigl = dir(entity)
bigl.sort()
if "__all__" in bigl:
smalll = entity.__all__
smalll.sort()
else:
smalll = [s for s in bigl if s[:1] != '_']
except:
return [], []
elif mode == COMPLETE_FILES:
if what == "":
what = "."
try:
expandedpath = os.path.expanduser(what)
bigl = os.listdir(expandedpath)
bigl.sort()
smalll = [s for s in bigl if s[:1] != '.']
except OSError:
return [], []
if not smalll:
smalll = bigl
return smalll, bigl
def get_entity(self, name):
"""Lookup name in a namespace spanning sys.modules and __main.dict__"""
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
return eval(name, namespace)
| apache-2.0 | -1,874,823,991,964,885,500 | 38.567686 | 79 | 0.54707 | false |
domesticduck/MenuConciergeServer | vendor/bundle/ruby/2.0.0/gems/libv8-3.16.14.3/vendor/v8/tools/testrunner/local/verbose.py | 19 | 3680 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
from . import statusfile
REPORT_TEMPLATE = (
"""Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(timeout)4d tests are expected to timeout sometimes
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix""")
def PrintReport(tests):
total = len(tests)
skipped = timeout = nocrash = passes = fail_ok = fail = 0
for t in tests:
if "outcomes" not in dir(t) or not t.outcomes:
passes += 1
continue
o = t.outcomes
if statusfile.DoSkip(o):
skipped += 1
continue
if statusfile.TIMEOUT in o: timeout += 1
if statusfile.IsFlaky(o): nocrash += 1
if list(o) == [statusfile.PASS]: passes += 1
if statusfile.IsFailOk(o): fail_ok += 1
if list(o) == [statusfile.FAIL]: fail += 1
print REPORT_TEMPLATE % {
"total": total,
"skipped": skipped,
"timeout": timeout,
"nocrash": nocrash,
"pass": passes,
"fail_ok": fail_ok,
"fail": fail
}
def PrintTestSource(tests):
for test in tests:
suite = test.suite
source = suite.GetSourceForTest(test).strip()
if len(source) > 0:
print "--- begin source: %s/%s ---" % (suite.name, test.path)
print source
print "--- end source: %s/%s ---" % (suite.name, test.path)
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def PrintTestDurations(suites, overall_time):
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
timed_tests = [ t for s in suites for t in s.tests
if t.duration is not None ]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
| apache-2.0 | 7,575,706,523,352,208,000 | 36.171717 | 75 | 0.683152 | false |
theo-l/django | tests/postgres_tests/test_hstore.py | 11 | 14088 | import json
from django.core import checks, exceptions, serializers
from django.db import connection
from django.db.models import OuterRef, Subquery
from django.db.models.expressions import RawSQL
from django.forms import Form
from django.test.utils import CaptureQueriesContext, isolate_apps
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase
from .models import HStoreModel, PostgreSQLModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.fields.hstore import KeyTransform
from django.contrib.postgres.validators import KeysValidator
except ImportError:
pass
class SimpleTests(PostgreSQLTestCase):
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertIsNone(reloaded.field)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_key_val_cast_to_string(self):
value = {'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'}
expected_value = {'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'}
instance = HStoreModel.objects.create(field=value)
instance = HStoreModel.objects.get()
self.assertEqual(instance.field, expected_value)
instance = HStoreModel.objects.get(field__a=1)
self.assertEqual(instance.field, expected_value)
instance = HStoreModel.objects.get(field__has_keys=[2, 'a', 'ï'])
self.assertEqual(instance.field, expected_value)
def test_array_field(self):
value = [
{'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'},
{'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'},
]
expected_value = [
{'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'},
{'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'},
]
instance = HStoreModel.objects.create(array_field=value)
instance.refresh_from_db()
self.assertEqual(instance.array_field, expected_value)
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = HStoreModel.objects.bulk_create([
HStoreModel(field={'a': 'b'}),
HStoreModel(field={'a': 'b', 'c': 'd'}),
HStoreModel(field={'c': 'd'}),
HStoreModel(field={}),
HStoreModel(field=None),
])
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_in_generator(self):
def search():
yield {'a': 'b'}
self.assertSequenceEqual(
HStoreModel.objects.filter(field__in=search()),
self.objs[:1]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_any_keys=['a', 'c']),
self.objs[:3]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_key_transform_raw_expression(self):
expr = RawSQL('%s::hstore', ['x => b, y => c'])
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a=KeyTransform('x', expr)),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_order_by_field(self):
more_objs = (
HStoreModel.objects.create(field={'g': '637'}),
HStoreModel.objects.create(field={'g': '002'}),
HStoreModel.objects.create(field={'g': '042'}),
HStoreModel.objects.create(field={'g': '981'}),
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='g').order_by('field__g'),
[more_objs[1], more_objs[2], more_objs[0], more_objs[3]]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
def test_key_isnull(self):
obj = HStoreModel.objects.create(field={'a': None})
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=True),
self.objs[2:5] + [obj]
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=False),
self.objs[:2]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(id__in=HStoreModel.objects.filter(field__a='b')),
self.objs[:2]
)
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertFalse(
HStoreModel.objects.filter(**{
"field__test' = 'a') OR 1 = 1 OR ('d": 'x',
}).exists()
)
self.assertIn(
"""."field" -> 'test'' = ''a'') OR 1 = 1 OR (''d') = 'x' """,
queries[0]['sql'],
)
def test_obj_subquery_lookup(self):
qs = HStoreModel.objects.annotate(
value=Subquery(HStoreModel.objects.filter(pk=OuterRef('pk')).values('field')),
).filter(value__a='b')
self.assertSequenceEqual(qs, self.objs[:2])
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLSimpleTestCase):
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = HStoreField(default={})
model = MyModel()
self.assertEqual(model.check(), [
checks.Warning(
msg=(
"HStoreField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint='Use a callable instead, e.g., use `dict` instead of `{}`.',
obj=MyModel._meta.get_field('field'),
id='fields.E010',
)
])
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = HStoreField(default=dict)
self.assertEqual(MyModel().check(), [])
class TestSerialization(PostgreSQLSimpleTestCase):
test_data = json.dumps([{
'model': 'postgres_tests.hstoremodel',
'pk': None,
'fields': {
'field': json.dumps({'a': 'b'}),
'array_field': json.dumps([
json.dumps({'a': 'b'}),
json.dumps({'b': 'a'}),
]),
},
}])
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'}, array_field=[{'a': 'b'}, {'b': 'a'}])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
self.assertEqual(instance.array_field, [{'a': 'b'}, {'b': 'a'}])
def test_roundtrip_with_null(self):
instance = HStoreModel(field={'a': 'b', 'c': None})
data = serializers.serialize('json', [instance])
new_instance = list(serializers.deserialize('json', data))[0].object
self.assertEqual(instance.field, new_instance.field)
class TestValidation(PostgreSQLSimpleTestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of “a” is not a string or null.')
def test_none_allowed_as_value(self):
field = HStoreField()
self.assertEqual(field.clean({'a': None}, None), {'a': None})
class TestFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_non_dict_json(self):
field = forms.HStoreField()
msg = 'Input must be a JSON dictionary.'
with self.assertRaisesMessage(exceptions.ValidationError, msg) as cm:
field.clean('["a", "b", 1]')
self.assertEqual(cm.exception.code, 'invalid_format')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_none_value(self):
field = forms.HStoreField()
value = field.clean('{"a": null}')
self.assertEqual(value, {'a': None})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
def test_field_has_changed(self):
class HStoreFormTest(Form):
f1 = forms.HStoreField()
form_w_hstore = HStoreFormTest()
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': '{"a": 1}'})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': {"a": 1}})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': {"a": 1}})
self.assertTrue(form_w_hstore.has_changed())
class TestValidator(PostgreSQLSimpleTestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
| bsd-3-clause | 5,551,133,462,877,554,000 | 34.265664 | 113 | 0.576363 | false |
eranchetz/nupic | examples/opf/experiments/anomaly/spatial/10field_many_balanced/description.py | 96 | 16581 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21),
'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21),
'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21),
'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21),
'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21),
'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21),
'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21),
'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 | -2,198,227,235,380,133,000 | 37.922535 | 92 | 0.620891 | false |
bjolivot/ansible | lib/ansible/modules/network/lenovo/cnos_template.py | 19 | 7146 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send CLI templates to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_template
author: "Dave Kasberg (@dkasberg)"
short_description: Manage switch configuration using templates on devices running Lenovo CNOS
description:
- This module allows you to work with the running configuration of a switch. It provides a way
to execute a set of CNOS commands on a switch by evaluating the current running configuration
and executing the commands only if the specific settings have not been already configured.
The configuration source can be a set of commands or a template written in the Jinja2 templating language.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_template.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
commandfile:
description:
- This specifies the path to the CNOS command file which needs to be applied. This usually
comes from the commands folder. Generally this file is the output of the variables applied
on a template file. So this command is preceded by a template module.
Note The command file must contain the Ansible keyword {{ inventory_hostname }} in its
filename to ensure that the command file is unique for each switch and condition.
If this is omitted, the command file will be overwritten during iteration. For example,
commandfile=./commands/clos_leaf_bgp_{{ inventory_hostname }}_commands.txt
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_template. These are written in the main.yml file of the tasks directory.
---
- name: Replace Config CLI command template with values
template:
src: demo_template.j2
dest: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
vlanid1: 13
slot_chassis_number1: "1/2"
portchannel_interface_number1: 100
portchannel_mode1: "active"
- name: Applying CLI commands on Switches
cnos_template:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
commandfile: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
outputfile: "./results/demo_template_command_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Template Applied.]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
commandfile=dict(required=True),
outputfile=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
commandfile = module.params['commandfile']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send commands one by one
#with open(commandfile, "r") as f:
f = open(commandfile, "r")
for line in f:
# Omit the comment lines in template file
if not line.startswith("#"):
command = line
if not line.endswith("\n"):
command = command+"\n"
response = cnos.waitForDeviceResponse(command, "#", 2, remote_conn)
errorMsg = cnos.checkOutputForError(response)
output = output + response
if(errorMsg is not None):
break # To cater to Mufti case
# Write to memory
output = output + cnos.waitForDeviceResponse("save\n", "#", 3, remote_conn)
# Write output to file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Template Applied")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,577,342,859,167,690,000 | 38.480663 | 140 | 0.688497 | false |
Amechi101/indieapp | account/auth_backends.py | 5 | 1360 | from __future__ import unicode_literals
from django.db.models import Q
from django.contrib.auth.backends import ModelBackend
from account.compat import get_user_model, get_user_lookup_kwargs
from account.models import EmailAddress
class UsernameAuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
User = get_user_model()
lookup_kwargs = get_user_lookup_kwargs({
"{username}__iexact": credentials["username"]
})
try:
user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, KeyError):
return None
else:
try:
if user.check_password(credentials["password"]):
return user
except KeyError:
return None
class EmailAuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
qs = EmailAddress.objects.filter(Q(primary=True) | Q(verified=True))
try:
email_address = qs.get(email__iexact=credentials["username"])
except (EmailAddress.DoesNotExist, KeyError):
return None
else:
user = email_address.user
try:
if user.check_password(credentials["password"]):
return user
except KeyError:
return None
| mit | 5,096,587,762,099,469,000 | 30.627907 | 76 | 0.604412 | false |
nelsonw2014/watershed | setup.py | 1 | 1598 | import setuptools
from os import path
if __name__ == "__main__":
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="watershed",
version="0.0.1",
description="Data streams drain into a data reservoir so they can be used later or combined together",
author="CommerceHub Open Source",
url="https://github.com/commercehub-oss/watershed",
long_description=long_description,
packages=[
"watershed",
"pump_client"
],
install_requires=[
"Boto3",
"sshtunnel",
"requests"
],
test_requires=[
'nose',
'requests'
],
include_package_data=True,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Ruby',
'Programming Language :: Unix Shell',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving',
'Topic :: System :: Clustering',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities'
],
zip_safe=False,
) | apache-2.0 | -7,060,895,278,487,383,000 | 33.76087 | 110 | 0.533792 | false |
dennissergeev/faamtools | faamtools/avaps.py | 2 | 2620 | # -*- coding: utf-8 -*-
"""
Functions to process AVAPS dropsondes data
"""
import datetime
import netCDF4 as nc
import numpy as np
from . import DsFld, ObsData
from . import utils
def read_avaps_nc(fname, flds=None, time2datetime=False):
"""Read AVAPS dropsonde data from a NetCDF file.
Open a NetCDF file and write data into `ObsData` instance of `DsFld` objects.
Perform filtering of raw dropsonde data using `var_utils.filt_miss_row`
Args:
-----
fname: str, file name
Kwargs:
-------
flds: dict, names of variables to read from a dropsonde data file
The default value is
dict(time='time',hgt='alt',lon='lon',lat='lat',
u='u_wind',v='v_wind',wspd='wspd',wdir='wdir',
pres='pres',tdry='tdry',thta='theta',dhgt='dz',
tdew='dp',relh='rh',mixr='mr',thte='theta_e',thtv='theta_v')
time2datetime: boolean, optional.
If True and `flds` dictionary contains 'time' key, convert array of
time values to `datetime.datetime` objects.
Requires `var_utils.timestr2datetime()` to parse time units.
Defaults to False.
Returns:
--------
data: `ObsData` instance
"""
if flds == None:
flds = dict(time='time',hgt='alt',lon='lon',lat='lat',\
u='u_wind',v='v_wind',wspd='wspd',wdir='wdir',\
pres='pres',tdry='tdry',thta='theta',dhgt='dz',\
tdew='dp',relh='rh',mixr='mr',thte='theta_e',thtv='theta_v')
with nc.Dataset(fname) as f:
dum = ObsData()
for i in flds:
ncfld = f.variables[flds[i]]
dum(**{i:DsFld(raw=ncfld[:],units=ncfld.units,long_name=ncfld.long_name)})
flds_list = [ii for ii in flds] # to keep the order
fil_list = utils.filt_miss_row(*[getattr(dum,ii).raw for ii in flds_list])
data = ObsData()
for i, j in enumerate(fil_list):
data(**{flds_list[i]:DsFld(raw=getattr(dum,flds_list[i]).raw,\
fil=j,\
units=getattr(dum,flds_list[i]).units,\
long_name=getattr(dum,flds_list[i]).long_name)})
if time2datetime and 'time' in flds:
if hasattr(data.time, 'units'):
tbase, tstep_sec = utils.timestr2datetime(data.time.units)
arr_sec2datetime = np.vectorize(lambda x: tbase + datetime.timedelta(seconds=x*tstep_sec))
data.time.fil = arr_sec2datetime(data.time.fil)
return data
| mit | -4,138,353,992,132,944,000 | 36.971014 | 102 | 0.55687 | false |
CSGreater-Developers/HMC-Grader | app/userViews/instructor/testSettings.py | 2 | 4117 | # coding=utf-8
'''This module handles all functions responsible for modifying the course
settings page.
View Function: instructorEditTestFile (instructor/testSettings.html)
Redirect Functions: TODO
AJAX Fuctions: TODO
'''
#Import the app
from app import app
#Import needed flask functions
from flask import g, render_template, redirect, url_for, flash, jsonify, abort
from flask import request
from flask.ext.login import current_user, login_required
#Import the models we need on these pages
from app.structures.models.user import *
from app.structures.models.gradebook import *
from app.structures.models.course import *
from app.structures.forms import ReuploadTestForm
#import plugins
from app.plugins.autograder import getTestFileParsers
#Generic python imports
import json
from werkzeug import secure_filename
@app.route('/editproblem/<pid>/editTestFile/<filename>')
@login_required
def instructorEditTestFile(pid, filename):
try:
p = Problem.objects.get(id=pid)
c,a = p.getParents()
#For security purposes we send anyone who isnt an instructor or
#admin away
if not c in current_user.courseInstructor:
abort(403)
filepath = getTestPath(c, a, p)
filepath = os.path.join(filepath, filename)
return render_template('instructor/testSettings.html', course=c, assignment=a,\
problem=p, filename=filename, \
data=getTestData(filepath), form=ReuploadTestForm())
except (Course.DoesNotExist, Problem.DoesNotExist, AssignmentGroup.DoesNotExist):
abort(404)
@app.route('/editproblem/<pid>/saveTestFile/<filename>', methods=['POST'])
@login_required
def instructorSaveTestFile(pid, filename):
try:
p = Problem.objects.get(id=pid)
c,a = p.getParents()
#For security purposes we send anyone who isnt an instructor or
#admin away
if not (g.user.isAdmin or c in current_user.courseInstructor):
abort(403)
#Try to get the contents
content = request.get_json()
#make sure we got the contents
if content == None:
return jsonify(res=False)
filepath = getTestPath(c, a, p)
filepath = os.path.join(filepath, filename+".json")
with open(filepath, 'w') as f:
json.dump(content, f, sort_keys=True,indent=4, separators=(',', ': '))
return jsonify(res=True)
except (Course.DoesNotExist, Problem.DoesNotExist, AssignmentGroup.DoesNotExist):
abort(404)
@app.route('/editproblem/<pid>/reupTestFile/<filename>', methods=['POST'])
@login_required
def instructorReuploadTestFile(pid, filename):
try:
p = Problem.objects.get(id=pid)
c,a = p.getParents()
#For security purposes we send anyone who isnt an instructor or
#admin away
if not c in current_user.courseInstructor:
abort(403)
filepath = getTestPath(c, a, p)
filepath = os.path.join(filepath, filename)
gradeSpec = getTestData(filepath)
parser = getTestFileParsers()[gradeSpec['type']]
if request.method == "POST":
form = ReuploadTestForm(request.form)
if form.validate():
filename = secure_filename(request.files[form.testFile.name].filename)
if filename != gradeSpec['file']:
flash("Uploaded file does not have the same name as the existing file. Reupload failed.", "warning")
return redirect(url_for('instructorEditTestFile', pid=pid, filename=gradeSpec['file']))
request.files[form.testFile.name].save(filepath)
tests = parser(filepath)
#Filter out removed tests
for sec in gradeSpec['sections']:
sec['tests'] = [x for x in sec['tests'] if x in tests]
gradeSpec['tests'] = tests
with open(filepath+".json", 'w') as f:
json.dump(gradeSpec, f)
flash("File successfully reuploaded", "success")
return redirect(url_for('instructorEditTestFile', pid=pid, filename=filename))
except (Course.DoesNotExist, Problem.DoesNotExist, AssignmentGroup.DoesNotExist):
abort(404)
#
# Helper function for test data
#
def getTestData(fn):
with open(fn+".json") as f:
data = json.load(f)
return data
| mit | -1,211,996,728,680,589,300 | 29.496296 | 110 | 0.69711 | false |
hugovk/pylast | tests/test_pylast.py | 1 | 4174 | #!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import os
import time
import unittest
import pytest
from flaky import flaky
import pylast
def load_secrets():
secrets_file = "test_pylast.yaml"
if os.path.isfile(secrets_file):
import yaml # pip install pyyaml
with open(secrets_file, "r") as f: # see example_test_pylast.yaml
doc = yaml.load(f)
else:
doc = {}
try:
doc["username"] = os.environ["PYLAST_USERNAME"].strip()
doc["password_hash"] = os.environ["PYLAST_PASSWORD_HASH"].strip()
doc["api_key"] = os.environ["PYLAST_API_KEY"].strip()
doc["api_secret"] = os.environ["PYLAST_API_SECRET"].strip()
except KeyError:
pytest.skip("Missing environment variables: PYLAST_USERNAME etc.")
return doc
class PyLastTestCase(unittest.TestCase):
def assert_startswith(self, str, prefix, start=None, end=None):
self.assertTrue(str.startswith(prefix, start, end))
def assert_endswith(self, str, suffix, start=None, end=None):
self.assertTrue(str.endswith(suffix, start, end))
@flaky(max_runs=3, min_passes=1)
class TestPyLastWithLastFm(PyLastTestCase):
secrets = None
def unix_timestamp(self):
return int(time.time())
def setUp(self):
if self.__class__.secrets is None:
self.__class__.secrets = load_secrets()
self.username = self.__class__.secrets["username"]
password_hash = self.__class__.secrets["password_hash"]
api_key = self.__class__.secrets["api_key"]
api_secret = self.__class__.secrets["api_secret"]
self.network = pylast.LastFMNetwork(
api_key=api_key,
api_secret=api_secret,
username=self.username,
password_hash=password_hash,
)
def helper_is_thing_hashable(self, thing):
# Arrange
things = set()
# Act
things.add(thing)
# Assert
self.assertIsNotNone(thing)
self.assertEqual(len(things), 1)
def helper_validate_results(self, a, b, c):
# Assert
self.assertIsNotNone(a)
self.assertIsNotNone(b)
self.assertIsNotNone(c)
self.assertGreaterEqual(len(a), 0)
self.assertGreaterEqual(len(b), 0)
self.assertGreaterEqual(len(c), 0)
self.assertEqual(a, b)
self.assertEqual(b, c)
def helper_validate_cacheable(self, thing, function_name):
# Arrange
# get thing.function_name()
func = getattr(thing, function_name, None)
# Act
result1 = func(limit=1, cacheable=False)
result2 = func(limit=1, cacheable=True)
result3 = func(limit=1)
# Assert
self.helper_validate_results(result1, result2, result3)
def helper_at_least_one_thing_in_top_list(self, things, expected_type):
# Assert
self.assertGreater(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], pylast.TopItem)
self.assertIsInstance(things[0].item, expected_type)
def helper_only_one_thing_in_top_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], pylast.TopItem)
self.assertIsInstance(things[0].item, expected_type)
def helper_only_one_thing_in_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], expected_type)
def helper_two_different_things_in_top_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 2)
thing1 = things[0]
thing2 = things[1]
self.assertIsInstance(thing1, pylast.TopItem)
self.assertIsInstance(thing2, pylast.TopItem)
self.assertIsInstance(thing1.item, expected_type)
self.assertIsInstance(thing2.item, expected_type)
self.assertNotEqual(thing1, thing2)
if __name__ == "__main__":
unittest.main(failfast=True)
| apache-2.0 | 2,102,698,527,309,571,800 | 29.918519 | 78 | 0.624341 | false |
weiting-chen/manila | manila/tests/fake_volume.py | 4 | 1978 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class FakeVolume(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', 'fake_vol_id')
self.status = kwargs.pop('status', 'available')
self.device = kwargs.pop('device', '')
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, attr):
return getattr(self, attr)
class FakeVolumeSnapshot(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', 'fake_volsnap_id')
self.status = kwargs.pop('status', 'available')
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, attr):
return getattr(self, attr)
class API(object):
"""Fake Volume API."""
def get(self, *args, **kwargs):
pass
def create_snapshot_force(self, *args, **kwargs):
pass
def get_snapshot(self, *args, **kwargs):
pass
def delete_snapshot(self, *args, **kwargs):
pass
def create(self, *args, **kwargs):
pass
def extend(self, *args, **kwargs):
pass
def get_all(self, search_opts):
pass
def delete(self, volume_id):
pass
def get_all_snapshots(self, search_opts):
pass
| apache-2.0 | -8,207,143,754,066,170,000 | 25.72973 | 78 | 0.624368 | false |
napkindrawing/ansible | lib/ansible/utils/module_docs_fragments/vyos.py | 224 | 2754 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
"""
| gpl-3.0 | -3,828,806,618,456,020,000 | 40.104478 | 87 | 0.676107 | false |
waxmanr/moose | framework/contrib/nsiqcppstyle/rules/RULE_4_1_B_indent_each_enum_item_in_enum_block.py | 43 | 3015 | """
Indent the each enum item in the enum block.
== Violation ==
enum A {
A_A, <== Violation
A_B <== Violation
}
== Good ==
enum A {
A_A, <== Good
A_B
}
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, typeName, typeFullName, decl, contextStack, typeContext) :
if not decl and typeName == "ENUM" and typeContext != None:
column = GetIndentation(lexer.GetCurToken())
lexer._MoveToToken(typeContext.startToken)
t2 = typeContext.endToken
while(True) :
t = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if t == None or t == t2 :
break
# if typeContext != t.contextStack.Peek() : continue
if GetRealColumn(t) <= (column + 1):
nsiqcppstyle_reporter.Error(t, __name__, "Enum block should be indented. But the token(%s) seems to be unindented" % t.value);
ruleManager.AddTypeNameRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("test/thisFile.c",
"""
enum A {
}
""")
assert not CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA, BB
}
""")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4,
BB
}
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4
,BB
}
""")
assert CheckErrorContent(__name__)
def test5(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4
/** HELLO */
,BB
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("test/thisFile.c",
"""
typedef enum {
AA = 4
/** HELLO */
,BB
} DD
""")
assert not CheckErrorContent(__name__)
def test7(self):
self.Analyze("test/thisFile.c",
"""
typedef enum
{
SERVICE,
SERVER,
BROKER,
MANAGER,
REPL_SERVER,
REPL_AGENT,
UTIL_HELP,
UTIL_VERSION,
ADMIN
} UTIL_SERVICE_INDEX_E;
""")
assert not CheckErrorContent(__name__)
def test8(self):
self.Analyze("test/thisFile.c",
"""
enum COLOR
{
COLOR_TRANSPARENT = RGB(0, 0, 255),
COLOR_ROOM_IN_OUT = 0xffff00,
COLOR_CHAT_ITEM = 0xff9419,
COLOR_CHAT_MY = 0x00b4ff,
COLOR_CHAT_YOUR = 0xa3d5ff,
COLOR_ROOM_INFO = 0x00ffff,
COLOR_RESULT_SCORE = 0xffcc00,
COLOR_RESULT_RATING = 0x00fcff,
COLOR_RESULT_POINT = 0x33ff00
}; """)
assert not CheckErrorContent(__name__)
| lgpl-2.1 | -7,119,062,214,850,458,000 | 21.5 | 142 | 0.553234 | false |
petrus-v/odoo | addons/l10n_ro/__openerp__.py | 186 | 2241 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Fekete Mihai <[email protected]>, Tatár Attila <[email protected]>
# Copyright (C) 2011-2014 TOTAL PC SYSTEMS (http://www.erpsystems.ro).
# Copyright (C) 2014 Fekete Mihai
# Copyright (C) 2014 Tatár Attila
# Based on precedent versions developed by Fil System, Fekete Mihai
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Romania - Accounting",
"version" : "1.0",
"author" : "ERPsystems Solutions",
"website": "http://www.erpsystems.ro",
"category" : "Localization/Account Charts",
"depends" : ['account','account_chart','base_vat'],
"description": """
This is the module to manage the Accounting Chart, VAT structure, Fiscal Position and Tax Mapping.
It also adds the Registration Number for Romania in OpenERP.
================================================================================================================
Romanian accounting chart and localization.
""",
"demo" : [],
"data" : ['partner_view.xml',
'account_chart.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'fiscal_position_template.xml',
'l10n_chart_ro_wizard.xml',
'res.country.state.csv',
'res.bank.csv',
],
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,886,629,573,844,177,000 | 42.057692 | 112 | 0.57347 | false |
Eureka22/ASM_xf | PythonD/lib/python2.4/site-packages/display/PIL/Image.py | 2 | 54748 | #
# The Python Imaging Library.
# $Id: //modules/pil/PIL/Image.py#47 $
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1996-05-27 fl PIL release 0.1b2
# 1996-11-04 fl PIL release 0.2b1
# 1996-12-08 fl PIL release 0.2b2
# 1996-12-16 fl PIL release 0.2b3
# 1997-01-14 fl PIL release 0.2b4
# 1998-07-02 fl PIL release 0.3b1
# 1998-07-17 fl PIL release 0.3b2
# 1999-01-01 fl PIL release 1.0b1
# 1999-02-08 fl PIL release 1.0b2
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-01-14 fl PIL release 1.2b1 (imToolkit)
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
#
# Copyright (c) 1997-2003 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
VERSION = "1.1.4"
class _imaging_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk
import FixTk
except ImportError:
pass
try:
# If the _imaging C module is not present, you can still use
# the "open" function to identify files, but you cannot load
# them. Note that other modules should not refer to _imaging
# directly; import Image and use the Image.core variable instead.
import _imaging
core = _imaging
del _imaging
except ImportError, v:
import string
core = _imaging_not_installed()
if str(v)[:20] == "Module use of python":
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
try:
import warnings
warnings.warn(
"The _imaging extension was built for another version "
"of Python; most PIL functions will be disabled",
RuntimeWarning
)
except (ImportError, NameError, AttributeError):
pass # sorry
import ImagePalette
import os, string, sys
# type stuff
from types import IntType, StringType, TupleType
try:
UnicodeStringType = type(unicode(""))
def isStringType(t):
return isinstance(t, StringType) or isinstance(t, UnicodeStringType)
except NameError:
def isStringType(t):
return isinstance(t, StringType)
def isTupleType(t):
return isinstance(t, TupleType)
def isImageType(t):
return hasattr(t, "im")
def isDirectory(f):
return isStringType(f) and os.path.isdir(f)
from operator import isNumberType, isSequenceType
#
# Debug level
DEBUG = 0
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2 # Not yet implemented
QUAD = 3
MESH = 4
# resampling filters
NONE = 0
NEAREST = 0
ANTIALIAS = 1 # 3-lobed lanczos
LINEAR = BILINEAR = 2
CUBIC = BICUBIC = 3
# dithers
NONE = 0
NEAREST = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
# Experimental modes include I;16, I;16B, RGBa, BGR;15,
# and BGR;24. Use these modes only if you know exactly
# what you're doing...
}
MODES = _MODEINFO.keys()
MODES.sort()
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16B")
##
# Get "base" mode. Given a mode, this function returns "L" for
# images that contain grayscale data, and "RGB" for images that
# contain color data.
#
# @param mode Input mode.
# @return "L" or "RGB".
# @exception KeyError The input mode was not a standard mode.
def getmodebase(mode):
# corresponding "base" mode (grayscale or colour)
return _MODEINFO[mode][0]
##
# Get storage type mode. Given a mode, this function returns a
# single-layer mode suitable for storing individual bands.
#
# @param mode Input mode.
# @return "L", "I", or "F".
# @exception KeyError The input mode was not a standard mode.
def getmodetype(mode):
# storage type (per band)
return _MODEINFO[mode][1]
##
# Get list of individual band names. Given a mode, this function
# returns a tuple containing the names of individual bands (use
# <b>getmodetype</b> to get the mode used to store each individual
# band.
#
# @param mode Input mode.
# @return A tuple containing band names. The length of the tuple
# gives the number of bands in an image of the given mode.
# @exception KeyError The input mode was not a standard mode.
def getmodebands(mode):
# return list of subcomponents
return len(_MODEINFO[mode][2])
# --------------------------------------------------------------------
# Helpers
_initialized = 0
##
# Explicitly load standard file format drivers.
def preinit():
"Load standard file format drivers."
global _initialized
if _initialized >= 1:
return
for m in ("Bmp", "Gif", "Jpeg", "Ppm", "Png", "Tiff"):
try:
__import__("%sImagePlugin" % m, globals(), locals(), [])
except ImportError:
pass # ignore missing driver for now
_initialized = 1
##
# Explicitly load all available file format drivers.
def init():
"Load all file format drivers."
global _initialized
if _initialized >= 2:
return
visited = {}
directories = sys.path
try:
directories = directories + [os.path.dirname(__file__)]
except NameError:
pass
# only check directories (including current, if present in the path)
for directory in filter(isDirectory, directories):
fullpath = os.path.abspath(directory)
if visited.has_key(fullpath):
continue
for file in os.listdir(directory):
if file[-14:] == "ImagePlugin.py":
f, e = os.path.splitext(file)
try:
sys.path.insert(0, directory)
try:
__import__(f, globals(), locals(), [])
finally:
del sys.path[0]
except ImportError:
if DEBUG:
print "Image: failed to import",
print f, ":", sys.exc_value
visited[fullpath] = None
if OPEN or SAVE:
_initialized = 2
# --------------------------------------------------------------------
# Codec factories (used by tostring/fromstring and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print decoder, (mode,) + args + extra
return apply(decoder, (mode,) + args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print encoder, (mode,) + args + extra
return apply(encoder, (mode,) + args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
class _E:
def __init__(self, data): self.data = data
def __coerce__(self, other): return self, _E(other)
def __add__(self, other): return _E((self.data, "__add__", other.data))
def __mul__(self, other): return _E((self.data, "__mul__", other.data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isNumberType(c)):
return c, 0.0
if (a is stub and b == "__add__" and isNumberType(c)):
return 1.0, c
except TypeError: pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isNumberType(c) and
d == "__add__" and isNumberType(e)):
return c, e
except TypeError: pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
##
# This class represents an image object. To create Image objects, use
# the appropriate factory functions. There's hardly ever any reason
# to call the Image constructor directly.
#
# @see #open
# @see #new
# @see #fromstring
class Image:
format = None
format_description = None
def __init__(self):
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
new.palette = self.palette
if im.mode == "P":
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
def _copy(self):
self.load()
self.im = self.im.copy()
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile
if not file:
file = tempfile.mktemp()
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
file = file + "." + format
self.save(file, format)
return file
##
# Returns a string containing pixel data.
#
# @param encoder_name What encoder to use. The default is to
# use the standard "raw" encoder.
# @param *args Extra arguments to the encoder.
# @return An 8-bit string.
def tostring(self, encoder_name="raw", *args):
"Return image as a binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
data = []
while 1:
l, s, d = e.encode(65536)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tostring" % s)
return string.join(data, "")
##
# Returns the image converted to an X11 bitmap. This method
# only works for mode "1" images.
#
# @param name The name prefix to use for the bitmap variables.
# @return A string containing an X11 bitmap.
# @exception ValueError If the mode is not "1"
def tobitmap(self, name="image"):
"Return image as an XBM bitmap"
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tostring("xbm")
return string.join(["#define %s_width %d\n" % (name, self.size[0]),
"#define %s_height %d\n"% (name, self.size[1]),
"static char %s_bits[] = {\n" % name, data, "};"], "")
##
# Same as the <b>fromstring</b> function, but loads data
# into the current image.
def fromstring(self, data, decoder_name="raw", *args):
"Load data to image from binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
##
# Allocates storage for the image and loads the pixel data. In
# normal cases, you don't need to call this method, since the
# Image class automatically loads an opened image when it is
# accessed for the first time.
def load(self):
"Explicitly load pixel data."
if self.im and self.palette and self.palette.dirty:
# realize palette
apply(self.im.putpalette, self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if self.info.has_key("transparency"):
self.im.putpalettealpha(self.info["transparency"], 0)
self.palette.mode = "RGBA"
##
# Verify file contents. For data read from a file, this method
# attempts to determine if the file is broken, without actually
# decoding the image data. If this method finds any problems, it
# raises suitable exceptions. If you need to load the image after
# using this method, you must reopen the image file.
def verify(self):
"Verify file contents."
pass
##
# Returns a converted copy of an image. For the "P" mode, this
# translates pixels through the palette. If mode is omitted, a
# mode is chosen so that all information in the image and the
# palette can be represented without a palette.
# <p>
# The current version supports all possible conversions between
# "L", "RGB" and "CMYK."
# <p>
# When translating a colour image to black and white (mode "L"),
# the library uses the ITU-R 601-2 luma transform:
# <p>
# <b>L = R * 299/1000 + G * 587/1000 + B * 114/1000</b>
# <p>
# When translating a greyscale image into a bilevel image (mode
# "1"), all non-zero values are set to 255 (white). To use other
# thresholds, use the <b>point</b> method.
#
# @def convert(mode, matrix=None)
# @param mode The requested mode.
# @param matrix An optional conversion matrix. If given, this
# should be 4- or 16-tuple containing floating point values.
# @return An Image object.
def convert(self, mode=None, data=None, dither=None,
palette=WEB, colors=256):
"Convert to other pixel format"
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if data:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, data)
return self._new(im)
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
return self._new(im)
# colourspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
return self._new(im)
def quantize(self, colors=256, method=0, kmeans=0, palette=None):
# methods:
# 0 = median cut
# 1 = maximum coverage
# NOTE: this functionality will be moved to the extended
# quantizer interface in a later versions of PIL.
self.load()
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
im = self.im.quantize(colors, method, kmeans)
return self._new(im)
##
# Copies the image. Use this method if you wish to paste things
# into an image, but still retain the original.
#
# @return An Image object.
def copy(self):
"Copy raster data"
self.load()
im = self.im.copy()
return self._new(im)
##
# Returns a rectangular region from the current image. The box is
# a 4-tuple defining the left, upper, right, and lower pixel
# coordinate.
# <p>
# This is a lazy operation. Changes to the source image may or
# may not be reflected in the cropped image. To break the
# connection, call the <b>load</b> method on the cropped copy.
#
# @return An Image object.
def crop(self, box=None):
"Crop region from image"
self.load()
if box is None:
return self.copy()
# lazy operation
return _ImageCrop(self, box)
##
# Configures the image file loader so it returns a version of the
# image that as closely as possible matches the given mode and
# size. For example, you can use this method to convert a colour
# JPEG to greyscale while loading it, or to extract a 128x192
# version from a PCD file.
# <p>
# Note that this method modifies the Image object in place. If
# the image has already been loaded, this method has no effect.
#
# @param mode The requested mode.
# @param size The requested size.
def draft(self, mode, size):
"Configure image decoder"
pass
##
# Filter image by the given filter. For a list of available
# filters, see the <b>ImageFilter</b> module.
#
# @param filter Filter kernel.
# @return An Image object.
# @see ImageFilter
def filter(self, filter):
"Apply environment filter to image"
self.load()
from ImageFilter import Filter
if not isinstance(filter, Filter):
filter = filter()
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
##
# Returns a tuple containing the name of each band. For example,
# <b>getbands</b> on an RGB image returns ("R", "G", "B").
#
# @return A tuple containing band names.
def getbands(self):
"Get band names"
return _MODEINFO[self.mode][2]
##
# Calculates the bounding box of the non-zero regions in the
# image.
# @return The bounding box is returned as a 4-tuple defining the
# left, upper, right, and lower pixel coordinate. If the image
# is completely empty, this method returns None.
def getbbox(self):
"Get bounding box of actual data (non-zero pixels) in image"
self.load()
return self.im.getbbox()
##
# Returns the contents of an image as a sequence object containing
# pixel values. The sequence object is flattened, so that values
# for line one follow directly after the values of line zero, and
# so on.
# <p>
# Note that the sequence object returned by this method is an
# internal PIL data type, which only supports certain sequence
# operations. To convert it to an ordinary sequence (e.g. for
# printing), use <b>list(im.getdata())</b>.
#
# @param band What band to return. The default is to return
# all bands. To return a single band, pass in the index
# value (e.g. 0 to get the "R" band from an "RGB" image).
# @return A sequence-like object.
def getdata(self, band = None):
"Get image data as sequence object."
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
##
# Get the the minimum and maximum pixel values for each band in
# the image.
#
# @return For a single-band image, a 2-tuple containing the
# minimum and maximum pixel value. For a multi-band image,
# a tuple containing one 2-tuple for each band.
def getextrema(self):
"Get min/max value"
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
##
# Returns the pixel value at a given position.
#
# @param xy The coordinate, given as (x, y).
# @return The pixel value. If the image is a multi-layer image,
# this method returns a tuple.
def getpixel(self, xy):
"Get pixel value"
self.load()
return self.im.getpixel(xy)
def getprojection(self):
"Get projection to x and y axes"
self.load()
x, y = self.im.getprojection()
return map(ord, x), map(ord, y)
##
# Returns a histogram for the image. The histogram is returned as
# a list of pixel counts, one for each pixel value in the source
# image. If the image has more than one band, the histograms for
# all bands are concatenated (for example, the histogram for an
# "RGB" image contains 768 values).
# <p>
# A bilevel image (mode "1") is treated as a greyscale ("L") image
# by this method.
# <p>
# If a mask is provided, the method returns a histogram for those
# parts of the image where the mask image is non-zero. The mask
# image must have the same size as the image, and be either a
# bi-level image (mode "1") or a greyscale image ("L").
#
# @def histogram(mask=None)
# @param mask An optional mask.
# @return A list containing pixel counts.
def histogram(self, mask=None, extrema=None):
"Take histogram of image"
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
##
# (Deprecated) Returns a copy of the image where the data has been
# offset by the given distances. Data wraps around the edges. If
# yoffset is omitted, it is assumed to be equal to xoffset.
# <p>
# This method is deprecated. New code should use the <b>offset</b>
# function in the <b>ImageChops</b> module.
#
# @param xoffset The horizontal distance.
# @param yoffset The vertical distance. If omitted, both
# distances are set to the same value.
# @return An Image object.
def offset(self, xoffset, yoffset=None):
"(deprecated) Offset image in horizontal and/or vertical direction"
import ImageChops
return ImageChops.offset(self, xoffset, yoffset)
##
# Pastes another image into this image. The box argument is either
# a 2-tuple giving the upper left corner, a 4-tuple defining the
# left, upper, right, and lower pixel coordinate, or None (same as
# (0, 0)). If a 4-tuple is given, the size of the pasted image
# must match the size of the region.
# <p>
# If the modes don't match, the pasted image is converted to the
# mode of this image (see the <b>convert</b> method for details).
# <p>
# Instead of an image, the source can be a integer or tuple
# containing pixel values. The method then fills the region
# with the given colour. When creating RGB images, you can
# also use colour strings as supported by the ImageColor module.
# <p>
# If a mask is given, this method updates only the regions
# indicated by the mask. You can use either "1", "L" or "RGBA"
# images (in the latter case, the alpha band is used as mask).
# Where the mask is 255, the given image is copied as is. Where
# the mask is 0, the current value is preserved. Intermediate
# values can be used for transparency effects.
# <p>
# Note that if you paste an "RGBA" image, the alpha band is
# ignored. You can work around this by using the same image as
# both source image and mask.
#
# @param im Source image or pixel value (integer or tuple).
# @param box A 4-tuple giving the region to paste into. If a
# 2-tuple is used instead, it's treated as the upper left
# corner. If None is used instead, the source is pasted
# into the upper left corner.
# @param mask An optional mask image.
# @return An Image object.
def paste(self, im, box=None, mask=None):
"Paste other image into region"
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# lower left corner given; get size from image or mask
if isImageType(im):
box = box + (box[0]+im.size[0], box[1]+im.size[1])
else:
box = box + (box[0]+mask.size[0], box[1]+mask.size[1])
if isStringType(im):
import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
##
# Map image through lookup table or function.
#
# @param lut A lookup table, containing 256 values per band in the
# image. A function can be used instead, it should take a single
# argument. The function is called once for each possible pixel
# value, and the resulting table is applied to all bands of the
# image.
# @param mode Output mode (default is same as input). In the
# current version, this can only be used if the source image
# has mode "L" or "P", and the output has mode "1".
# @return An Image object.
def point(self, lut, mode=None):
"Map image through lookup table"
if self.mode in ("I", "I;16", "F"):
# floating point; lut must be a valid expression
scale, offset = _getscaleoffset(lut)
self.load()
im = self.im.point_transform(scale, offset);
else:
# integer image; use lut and mode
self.load()
if not isSequenceType(lut):
# if it isn't a list, it should be a function
lut = map(lut, range(256)) * self.im.bands
im = self.im.point(lut, mode)
return self._new(im)
##
# Replace the alpha layer in the current image. The image must be
# an "RGBA" image, and the band must be either "L" or "1".
#
# @param im The new alpha layer.
def putalpha(self, im):
"Set alpha layer"
if self.mode != "RGBA" or im.mode not in ("1", "L"):
raise ValueError("illegal image mode")
im.load()
self.load()
if im.mode == "1":
im = im.convert("L")
self.im.putband(im.im, 3)
##
# Copy pixel data to this image. This method copies data from a
# sequence object into the image, starting at the upper left
# corner (0, 0), and continuing until either the image or the
# sequence ends. The scale and offset values are used to adjust
# the sequence values: <b>pixel = value*scale + offset</b>.
#
# @param data A sequence object.
# @param scale An optional scale value. The default is 1.0.
# @param offset An optional offset value. The default is 0.0.
def putdata(self, data, scale=1.0, offset=0.0):
"Put data from a sequence object into an image."
self.load() # hmm...
self.im.putdata(data, scale, offset)
##
# Attach a palette to a "P" or "L" image. The palette sequence
# should contain 768 integer values, where each group of three
# values represent the red, green, and blue values for the
# corresponding pixel index. Instead of an integer sequence, you
# can use an 8-bit string.
#
# @def putpalette(data)
# @param data A palette sequence.
def putpalette(self, data, rawmode="RGB"):
"Put palette data into an image."
self.load()
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
if not isStringType(data):
data = string.join(map(chr, data), "")
self.mode = "P"
self.palette = ImagePalette.raw(rawmode, data)
self.palette.mode = "RGB"
self.load() # install new palette
##
# Modifies the pixel at the given position. The colour is given as
# a single numerical value for single-band images, and a tuple for
# multi-band images.
# <p>
# Note that this method is relatively slow. For more extensive
# changes, use <b>paste</b> or the <b>ImageDraw</b> module
# instead.
#
# @param xy The pixel coordinate, given as (x, y).
# @param value The pixel value.
# @see #Image.paste
# @see #Image.putdata
# @see ImageDraw
def putpixel(self, xy, value):
"Set pixel value"
self.load()
return self.im.putpixel(xy, value)
##
# Returns a resized copy of an image.
#
# @def resize(size, filter=NEAREST)
# @param size The requested size in pixels, as a 2-tuple:
# (width, height).
# @param filter An optional resampling filter. This can be
# one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), <b>BICUBIC</b>
# (cubic spline interpolation in a 4x4 environment), or
# <b>ANTIALIAS</b> (a high-quality downsampling filter).
# If omitted, or if the image has mode "1" or "P", it is
# set <b>NEAREST</b>.
# @return An Image object.
def resize(self, size, resample=NEAREST):
"Resize image"
if resample not in (NEAREST, BILINEAR, BICUBIC, ANTIALIAS):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
if resample == ANTIALIAS:
# requires stretch support (imToolkit & PIL 1.1.3)
try:
im = self.im.stretch(size, resample)
except AttributeError:
raise ValueError("unsupported resampling filter")
else:
im = self.im.resize(size, resample)
return self._new(im)
##
# Returns a rotated image. This method returns a copy of an
# image, rotated the given number of degrees counter clockwise
# around its centre.
#
# @def rotate(angle, filter=NEAREST)
# @param angle In degrees counter clockwise.
# @param filter An optional resampling filter. This can be
# one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), or <b>BICUBIC</b>
# (cubic spline interpolation in a 4x4 environment).
# If omitted, or if the image has mode "1" or "P", it is
# set <b>NEAREST</b>.
# @return An Image object.
def rotate(self, angle, resample=NEAREST):
"Rotate image. Angle given as degrees counter-clockwise."
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
return self._new(self.im.rotate(angle, resample))
##
# Saves the image under the given filename. If no format is
# specified, the format to use is determined from the filename
# extension, if possible.
# <p>
# Keyword options can be used to provide additional instructions
# to the writer. If a writer doesn't recognise an option, it is
# silently ignored. The available options are described later in
# this handbook.
# <p>
# You can use a file object instead of a filename. In this case,
# you must always specify the format. The file object must
# implement the <b>seek</b>, <b>tell</b>, and <b>write</b>
# methods, and be opened in binary mode.
#
# @def save(file, format=None, **options)
# @param file File name or file object.
# @param format Optional format override. If omitted, the
# format to use is determined from the filename extension.
# If a file object was used instead of a filename, this
# parameter should always be used.
# @param **options Extra parameters to the image writer.
# @return None
def save(self, fp, format=None, **params):
"Save image to file or stream"
if isStringType(fp):
import __builtin__
filename = fp
fp = __builtin__.open(fp, "wb")
close = 1
else:
if hasattr(fp, "name") and isStringType(fp.name):
filename = fp.name
else:
filename = ""
close = 0
self.encoderinfo = params
self.encoderconfig = ()
self.load()
preinit()
ext = string.lower(os.path.splitext(filename)[1])
try:
if not format:
format = EXTENSION[ext]
SAVE[string.upper(format)](self, fp, filename)
except KeyError, v:
init()
if not format:
format = EXTENSION[ext]
SAVE[string.upper(format)](self, fp, filename)
if close:
fp.close()
##
# Seeks to the given frame in a sequence file. If you seek beyond
# the end of the sequence, the method raises an <b>EOFError</b>
# exception. When a sequence file is opened, the library
# automatically seeks to frame 0.
# <p>
# Note that in the current version of the library, most sequence
# formats only allows you to seek to the next frame.
#
# @param frame Frame number, starting at 0.
# @exception EOFError Attempt to seek beyond the end of the sequence.
# @see #Image.tell
def seek(self, frame):
"Seek to given frame in sequence file"
# overridden by file handlers
if frame != 0:
raise EOFError
##
# Displays an image. This method is mainly intended for
# debugging purposes.
# <p>
# On Unix platforms, this method saves the image to a temporary
# PPM file, and calls the <b>xv</b> utility.
# <p>
# On Windows, it saves the image to a temporary BMP file, and uses
# the standard BMP display utility to show it (usually Paint).
#
# @def show(title=None)
# @param title Optional title to use for the image window,
# where possible.
def show(self, title=None, command=None):
"Display image (for debug purposes only)"
try:
import ImageTk
ImageTk._show(self, title)
# note: caller must enter mainloop!
except:
_showxv(self, title, command)
##
# Split image into individual bands. This methods returns a tuple
# of individual image bands from an image. For example, splitting
# an "RGB" image creates three new images each containing a copy
# of one of the original bands (red, green, blue).
#
# @return A tuple containing bands.
def split(self):
"Split image into bands"
ims = []
self.load()
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
##
# Returns the current frame number.
#
# @return Frame number, starting with 0.
# @see #Image.seek
def tell(self):
"Return current frame number"
return 0
##
# Make thumbnail. This method modifies the image to contain a
# thumbnail version of itself, no larger than the given size.
# This method calculates an appropriate thumbnail size to preserve
# the aspect of the image, calls the <b>draft</b> method to
# configure the file reader (where applicable), and finally
# resizes the image.
# <p>
# Note that the bilinear and bicubic filters in the current
# version of PIL are not well-suited for thumbnail generation.
# You should use <b>ANTIALIAS</b> unless speed is much more
# important than quality.
# <p>
# Also note that this function modifies the Image object in place.
# If you need to use the full resolution image as well, apply this
# method to a <b>copy</b> of the original image.
#
# @param size Requested size.
# @param resample Optional resampling filter. This can be one
# of <b>NEAREST</b>, <b>BILINEAR</b>, <b>BICUBIC</b>, or
# <b>ANTIALIAS</b> (best quality). If omitted, it defaults
# to <b>NEAREST</b> (this will be changed to ANTIALIAS in
# future versions).
# @return None
def thumbnail(self, size, resample=NEAREST):
"Create thumbnail representation (modifies image in place)"
# FIXME: the default resampling filter will be changed
# to ANTIALIAS in future versions
# preserve aspect ratio
x, y = self.size
if x > size[0]: y = y * size[0] / x; x = size[0]
if y > size[1]: x = x * size[1] / y; y = size[1]
size = x, y
if size == self.size:
return
self.draft(None, size)
self.load()
try:
im = self.resize(size, resample)
except ValueError:
if resample != ANTIALIAS:
raise
im = self.resize(size, NEAREST) # fallback
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
# FIXME: the different tranform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
##
# Transform image. This method creates a new image with the
# given size, and the same mode as the original, and copies
# data to the new image using the given transform.
# <p>
# @def transform(size, method, data, resample=NEAREST)
# @param size The output size.
# @param method The transformation method. This is one of
# <b>EXTENT</b> (cut out a rectangular subregion), <b>AFFINE</b>
# (affine transform), <b>QUAD</b> (map a quadrilateral to a
# rectangle), or <b>MESH</b> (map a number of source quadrilaterals
# in one operation).
# @param data Extra data to the transformation method.
# @param resample Optional resampling filter. It can be one of
# <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), or
# <b>BICUBIC</b> (cubic spline interpolation in a 4x4
# environment). If omitted, or if the image has mode
# "1" or "P", it is set to <b>NEAREST</b>.
# @return An Image object.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"Transform image"
import ImageTransform
if isinstance(method, ImageTransform.Transform):
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
"Transform into current image"
# FIXME: this should be turned into a lazy operation (?)
w = box[2]-box[0]
h = box[3]-box[1]
if method == AFFINE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4])
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys)
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]; sw = data[2:4]; se = data[4:6]; ne = data[6:8]
x0, y0 = nw; As = 1.0 / w; At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
##
# Returns a flipped or rotated copy of an image.
#
# @param method One of <b>FLIP_LEFT_RIGHT</b>, <b>FLIP_TOP_BOTTOM</b>,
# <b>ROTATE_90</b>, <b>ROTATE_180</b>, or <b>ROTATE_270</b>.
def transpose(self, method):
"Transpose image (flip or rotate in 90 degree steps)"
self.load()
im = self.im.transpose(method)
return self._new(im)
# --------------------------------------------------------------------
# Lazy operations
class _ImageCrop(Image):
def __init__(self, im, box):
Image.__init__(self)
self.mode = im.mode
self.size = box[2]-box[0], box[3]-box[1]
self.__crop = box
self.im = im.im
def load(self):
# lazy evaluation!
if self.__crop:
self.im = self.im.crop(self.__crop)
self.__crop = None
# FIXME: future versions should optimize crop/paste
# sequences!
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
##
# Creates a new image with the given mode and size.
#
# @param mode The mode to use for the new image.
# @param size A 2-tuple, containing (width, height)
# @param color What colour to use for the image. Default is black.
# If given, this should be a single integer or floating point value
# for single-band modes, and a tuple for multi-band modes (one value
# per band). When creating RGB images, you can also use colour
# strings as supported by the ImageColor module. If the colour is
# None, the image is not initialised.
# @return An Image object.
def new(mode, size, color=0):
"Create a new image"
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
##
# Creates an image memory from pixel data in a string.
# <p>
# In its simplest form, this function takes three arguments
# (mode, size, and unpacked pixel data).
# <p>
# You can also use any pixel decoder supported by PIL. For more
# information on available decoders, see the section <a
# href="decoder"><i>Writing Your Own File Decoder</i></a>.
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image in a string, wrap it in a <b>StringIO</b>
# object, and use <b>open</b> to load it.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string containing raw data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder.
# @return An Image object.
def fromstring(mode, size, data, decoder_name="raw", *args):
"Load image from string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.fromstring(data, decoder_name, args)
return im
##
# Creates an image memory from pixel data in a string or byte buffer.
# <p>
# This function is similar to <b>fromstring</b>, but it data in
# the byte buffer, where possible. Images created by this function
# are usually marked as readonly.
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image in a string, wrap it in a <b>StringIO</b>
# object, and use <b>open</b> to load it.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string or other buffer object containing raw
# data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder.
# @return An Image object.
def frombuffer(mode, size, data, decoder_name="raw", *args):
"Load image from string or buffer"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw":
if args == ():
args = mode, 0, -1
if args[0] in _MAPMODES:
im = new(mode, (1,1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return apply(fromstring, (mode, size, data, decoder_name, args))
##
# Opens and identifies the given image file.
# <p>
# This is a lazy operation; this function identifies the file, but the
# actual image data is not read from the file until you try to process
# the data (or call the <b>load</b> method).
#
# @def open(file, mode="r")
# @param file A filename (string) or a file object. The file object
# must implement <b>read</b>, <b>seek</b>, and <b>tell</b> methods,
# and be opened in binary mode.
# @param mode The mode. If given, this argument must be "r".
# @return An Image object.
# @exception IOError If the file cannot be found, or the image cannot be
# opened and identified.
# @see #new
def open(fp, mode="r"):
"Open an image file, without loading the raster data"
if mode != "r":
raise ValueError("bad mode")
if isStringType(fp):
import __builtin__
filename = fp
fp = __builtin__.open(fp, "rb")
else:
filename = ""
prefix = fp.read(16)
preinit()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
init()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
raise IOError("cannot identify image file")
#
# Image processing.
##
# Creates a new image by interpolating between the given images, using
# a constant alpha.
#
# <pre>
# out = image1 * (1.0 - alpha) + image2 * alpha
# </pre>
#
# @param im1 The first image.
# @param im2 The second image. Must have the same mode and size as
# the first image.
# @param alpha The interpolation alpha factor. If alpha is 0.0, a
# copy of the first image is returned. If alpha is 1.0, a copy of
# the second image is returned. There are no restrictions on the
# alpha value. If necessary, the result is clipped to fit into
# the allowed output range.
# @return An Image object.
def blend(im1, im2, alpha):
"Interpolate between images."
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
##
# Creates a new image by interpolating between the given images,
# using the mask as alpha.
#
# @param image1 The first image.
# @param image2 The second image. Must have the same mode and
# size as the first image.
# @param mask A mask image. This image can can have mode
# "1", "L", or "RGBA", and most have the same size as the
# other two images.
def composite(image1, image2, mask):
"Create composite image by blending images using a transparency mask"
image = image2.copy()
image.paste(image1, None, mask)
return image
##
# Applies the function (which should take one argument) to each pixel
# in the given image. If the image has more than one band, the same
# function is applied to each band. Note that the function is
# evaluated once for each possible pixel value, so you cannot use
# random components or other generators.
#
# @def eval(image, function)
# @param image The input image.
# @param function A function object, taking one integer argument.
# @return An Image object.
def eval(image, *args):
"Evaluate image expression"
return image.point(args[0])
##
# Creates a new image from a number of single-band images.
#
# @param mode The mode to use for the output image.
# @param bands A sequence containing one single-band image for
# each band in the output image. All bands must have the
# same size.
# @return An Image object.
def merge(mode, bands):
"Merge a set of single band images into a new multiband image."
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
##
# Register an image file plugin. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param factory An image file factory method.
# @param accept An optional function that can be used to quickly
# reject images having another format.
def register_open(id, factory, accept=None):
id = string.upper(id)
ID.append(id)
OPEN[id] = factory, accept
##
# Register an image MIME type. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param mimetype The image MIME type for this format.
def register_mime(id, mimetype):
MIME[string.upper(id)] = mimetype
##
# Register an image save function. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param driver A function to save images in this format.
def register_save(id, driver):
SAVE[string.upper(id)] = driver
##
# Register an image extension. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param extension An extension used for this format.
def register_extension(id, extension):
EXTENSION[string.lower(extension)] = string.upper(id)
# --------------------------------------------------------------------
# Simple display support
def _showxv(self, title=None, command=None):
if os.name == "nt":
format = "BMP"
if not command:
command = "start"
elif os.environ.get("OSTYPE") == "darwin":
format = "JPEG"
if not command:
command = "open -a /Applications/Preview.app"
else:
format = None
if not command:
command = "xv"
if title:
command = command + " -name \"%s\"" % title
if self.mode == "I;16":
# @PIL88 @PIL101
# "I;16" isn't an 'official' mode, but we still want to
# provide a simple way to show 16-bit images.
base = "L"
else:
base = getmodebase(self.mode)
if base != self.mode and self.mode != "1":
file = self.convert(base)._dump(format=format)
else:
file = self._dump(format=format)
if os.name == "nt":
os.system("%s %s" % (command, file))
# FIXME: this leaves temporary files around...
elif os.environ.get("OSTYPE") == "darwin":
# on darwin open returns immediately resulting in the temp
# file removal while app is opening
os.system("(%s %s; sleep 20; rm -f %s)&" % (command, file, file))
else:
os.system("(%s %s; rm -f %s)&" % (command, file, file))
| gpl-2.0 | 5,808,634,772,621,116,000 | 30.302459 | 77 | 0.591876 | false |
mhnatiuk/phd_sociology_of_religion | scrapper/build/scrapy/build/lib.linux-x86_64-2.7/scrapy/commands/deploy.py | 15 | 8793 | from __future__ import print_function
import sys
import os
import glob
import tempfile
import shutil
import time
import urllib2
import netrc
import json
from urlparse import urlparse, urljoin
from subprocess import Popen, PIPE, check_call
from w3lib.form import encode_multipart
from scrapy.command import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.utils.http import basic_auth_header
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.conf import get_config, closest_scrapy_cfg
_SETUP_PY_TEMPLATE = \
"""# Automatically created by: scrapy deploy
from setuptools import setup, find_packages
setup(
name = 'project',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = %(settings)s']},
)
"""
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return "[options] [ [target] | -l | -L <target> ]"
def short_desc(self):
return "Deploy project in Scrapyd target"
def long_desc(self):
return "Deploy the current project into the given Scrapyd server " \
"(known as target)"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-p", "--project",
help="the project name in the target")
parser.add_option("-v", "--version",
help="the version to deploy. Defaults to current timestamp")
parser.add_option("-l", "--list-targets", action="store_true", \
help="list available targets")
parser.add_option("-d", "--debug", action="store_true",
help="debug mode (do not remove build dir)")
parser.add_option("-L", "--list-projects", metavar="TARGET", \
help="list available projects on TARGET")
parser.add_option("--egg", metavar="FILE",
help="use the given egg, instead of building it")
parser.add_option("--build-egg", metavar="FILE",
help="only build the egg, don't deploy it")
def run(self, args, opts):
try:
import setuptools
except ImportError:
raise UsageError("setuptools not installed")
urllib2.install_opener(urllib2.build_opener(HTTPRedirectHandler))
if opts.list_targets:
for name, target in _get_targets().items():
print("%-20s %s" % (name, target['url']))
return
if opts.list_projects:
target = _get_target(opts.list_projects)
req = urllib2.Request(_url(target, 'listprojects.json'))
_add_auth_header(req, target)
f = urllib2.urlopen(req)
projects = json.loads(f.read())['projects']
print(os.linesep.join(projects))
return
tmpdir = None
if opts.build_egg: # build egg only
egg, tmpdir = _build_egg()
_log("Writing egg to %s" % opts.build_egg)
shutil.copyfile(egg, opts.build_egg)
else: # buld egg and deploy
target_name = _get_target_name(args)
target = _get_target(target_name)
project = _get_project(target, opts)
version = _get_version(target, opts)
if opts.egg:
_log("Using egg: %s" % opts.egg)
egg = opts.egg
else:
_log("Packing version %s" % version)
egg, tmpdir = _build_egg()
if not _upload_egg(target, egg, project, version):
self.exitcode = 1
if tmpdir:
if opts.debug:
_log("Output dir not removed: %s" % tmpdir)
else:
shutil.rmtree(tmpdir)
def _log(message):
sys.stderr.write(message + os.linesep)
def _get_target_name(args):
if len(args) > 1:
raise UsageError("Too many arguments: %s" % ' '.join(args))
elif args:
return args[0]
elif len(args) < 1:
return 'default'
def _get_project(target, opts):
project = opts.project or target.get('project')
if not project:
raise UsageError("Missing project")
return project
def _get_option(section, option, default=None):
cfg = get_config()
return cfg.get(section, option) if cfg.has_option(section, option) \
else default
def _get_targets():
cfg = get_config()
baset = dict(cfg.items('deploy')) if cfg.has_section('deploy') else {}
targets = {}
if 'url' in baset:
targets['default'] = baset
for x in cfg.sections():
if x.startswith('deploy:'):
t = baset.copy()
t.update(cfg.items(x))
targets[x[7:]] = t
return targets
def _get_target(name):
try:
return _get_targets()[name]
except KeyError:
raise UsageError("Unknown target: %s" % name)
def _url(target, action):
return urljoin(target['url'], action)
def _get_version(target, opts):
version = opts.version or target.get('version')
if version == 'HG':
p = Popen(['hg', 'tip', '--template', '{rev}'], stdout=PIPE)
d = 'r%s' % p.communicate()[0]
p = Popen(['hg', 'branch'], stdout=PIPE)
b = p.communicate()[0].strip('\n')
return '%s-%s' % (d, b)
elif version == 'GIT':
p = Popen(['git', 'describe', '--always'], stdout=PIPE)
d = p.communicate()[0].strip('\n')
p = Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=PIPE)
b = p.communicate()[0].strip('\n')
return '%s-%s' % (d, b)
elif version:
return version
else:
return str(int(time.time()))
def _upload_egg(target, eggpath, project, version):
with open(eggpath, 'rb') as f:
eggdata = f.read()
data = {
'project': project,
'version': version,
'egg': ('project.egg', eggdata),
}
body, boundary = encode_multipart(data)
url = _url(target, 'addversion.json')
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Content-Length': str(len(body)),
}
req = urllib2.Request(url, body, headers)
_add_auth_header(req, target)
_log('Deploying to project "%s" in %s' % (project, url))
return _http_post(req)
def _add_auth_header(request, target):
if 'username' in target:
u, p = target.get('username'), target.get('password', '')
request.add_header('Authorization', basic_auth_header(u, p))
else: # try netrc
try:
host = urlparse(target['url']).hostname
a = netrc.netrc().authenticators(host)
request.add_header('Authorization', basic_auth_header(a[0], a[2]))
except (netrc.NetrcParseError, IOError, TypeError):
pass
def _http_post(request):
try:
f = urllib2.urlopen(request)
_log("Server response (%s):" % f.code)
print(f.read())
return True
except urllib2.HTTPError as e:
_log("Deploy failed (%s):" % e.code)
print(e.read())
except urllib2.URLError as e:
_log("Deploy failed: %s" % e)
def _build_egg():
closest = closest_scrapy_cfg()
os.chdir(os.path.dirname(closest))
if not os.path.exists('setup.py'):
settings = get_config().get('settings', 'default')
_create_default_setup_py(settings=settings)
d = tempfile.mkdtemp(prefix="scrapydeploy-")
o = open(os.path.join(d, "stdout"), "wb")
e = open(os.path.join(d, "stderr"), "wb")
retry_on_eintr(check_call, [sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d], stdout=o, stderr=e)
o.close()
e.close()
egg = glob.glob(os.path.join(d, '*.egg'))[0]
return egg, d
def _create_default_setup_py(**kwargs):
with open('setup.py', 'w') as f:
f.write(_SETUP_PY_TEMPLATE % kwargs)
class HTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
newurl = newurl.replace(' ', '%20')
if code in (301, 307):
return urllib2.Request(newurl,
data=req.get_data(),
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
elif code in (302, 303):
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
return urllib2.Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
| gpl-2.0 | -4,714,787,654,887,287,000 | 33.081395 | 117 | 0.568293 | false |
Alwnikrotikz/cortex-vfx | test/IECore/CubeColorLookupTest.py | 12 | 2884 | ##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import random
import math
import os
from IECore import *
class GammaOp( ColorTransformOp ) :
def __init__( self, gamma = 1.0 ) :
ColorTransformOp.__init__( self, "applies gamma" )
self.gamma = gamma
def begin( self, operands ) :
pass
def transform( self, color ) :
return Color3f(
math.pow( color.r, 1.0 / self.gamma ),
math.pow( color.g, 1.0 / self.gamma ),
math.pow( color.b, 1.0 / self.gamma )
)
def end( self ) :
pass
class CubeColorLookupTest( unittest.TestCase ) :
def testOpConstruction( self ) :
gammaOp = GammaOp( 2.0 )
dim = V3i( 48, 66, 101 )
cubeLookup = CubeColorLookupf( dim, gammaOp )
random.seed( 23 )
# Perform 100 random comparisons with the LUT against the original function
for i in range( 0, 100 ) :
c = Color3f( random.random(), random.random(), random.random() )
c1 = cubeLookup( c )
c2 = gammaOp.transform( c )
self.assertAlmostEqual( c1.r, c2.r, 1 )
self.assertAlmostEqual( c1.g, c2.g, 1 )
self.assertAlmostEqual( c1.b, c2.b, 1 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -154,930,911,467,281,180 | 30.010753 | 77 | 0.673024 | false |
haad/ansible | lib/ansible/modules/identity/ipa/ipa_hbacrule.py | 134 | 13226 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_hbacrule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA HBAC rule
description:
- Add, modify or delete an IPA HBAC rule using IPA API.
options:
cn:
description:
- Canonical name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
description:
description: Description
host:
description:
- List of host names to assign.
- If an empty list is passed all hosts will be removed from the rule.
- If option is omitted hosts will not be checked or changed.
required: false
hostcategory:
description: Host category
choices: ['all']
hostgroup:
description:
- List of hostgroup names to assign.
- If an empty list is passed all hostgroups will be removed. from the rule
- If option is omitted hostgroups will not be checked or changed.
service:
description:
- List of service names to assign.
- If an empty list is passed all services will be removed from the rule.
- If option is omitted services will not be checked or changed.
servicecategory:
description: Service category
choices: ['all']
servicegroup:
description:
- List of service group names to assign.
- If an empty list is passed all assigned service groups will be removed from the rule.
- If option is omitted service groups will not be checked or changed.
sourcehost:
description:
- List of source host names to assign.
- If an empty list if passed all assigned source hosts will be removed from the rule.
- If option is omitted source hosts will not be checked or changed.
sourcehostcategory:
description: Source host category
choices: ['all']
sourcehostgroup:
description:
- List of source host group names to assign.
- If an empty list if passed all assigned source host groups will be removed from the rule.
- If option is omitted source host groups will not be checked or changed.
state:
description: State to ensure
default: "present"
choices: ["present", "absent", "enabled", "disabled"]
user:
description:
- List of user names to assign.
- If an empty list if passed all assigned users will be removed from the rule.
- If option is omitted users will not be checked or changed.
usercategory:
description: User category
choices: ['all']
usergroup:
description:
- List of user group names to assign.
- If an empty list if passed all assigned user groups will be removed from the rule.
- If option is omitted user groups will not be checked or changed.
extends_documentation_fragment: ipa.documentation
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure rule to allow all users to access any host from any host
- ipa_hbacrule:
name: allow_all
description: Allow all users to access any host from any host
hostcategory: all
servicecategory: all
usercategory: all
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure rule with certain limitations
- ipa_hbacrule:
name: allow_all_developers_access_to_db
description: Allow all developers to access any database from any host
hostgroup:
- db-server
usergroup:
- developers
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure rule is absent
- ipa_hbacrule:
name: rule_to_be_deleted
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
hbacrule:
description: HBAC rule as returned by IPA API.
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class HBACRuleIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
def hbacrule_find(self, name):
return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
def hbacrule_add(self, name, item):
return self._post_json(method='hbacrule_add', name=name, item=item)
def hbacrule_mod(self, name, item):
return self._post_json(method='hbacrule_mod', name=name, item=item)
def hbacrule_del(self, name):
return self._post_json(method='hbacrule_del', name=name)
def hbacrule_add_host(self, name, item):
return self._post_json(method='hbacrule_add_host', name=name, item=item)
def hbacrule_remove_host(self, name, item):
return self._post_json(method='hbacrule_remove_host', name=name, item=item)
def hbacrule_add_service(self, name, item):
return self._post_json(method='hbacrule_add_service', name=name, item=item)
def hbacrule_remove_service(self, name, item):
return self._post_json(method='hbacrule_remove_service', name=name, item=item)
def hbacrule_add_user(self, name, item):
return self._post_json(method='hbacrule_add_user', name=name, item=item)
def hbacrule_remove_user(self, name, item):
return self._post_json(method='hbacrule_remove_user', name=name, item=item)
def hbacrule_add_sourcehost(self, name, item):
return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
def hbacrule_remove_sourcehost(self, name, item):
return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
sourcehostcategory=None,
usercategory=None):
data = {}
if description is not None:
data['description'] = description
if hostcategory is not None:
data['hostcategory'] = hostcategory
if ipaenabledflag is not None:
data['ipaenabledflag'] = ipaenabledflag
if servicecategory is not None:
data['servicecategory'] = servicecategory
if sourcehostcategory is not None:
data['sourcehostcategory'] = sourcehostcategory
if usercategory is not None:
data['usercategory'] = usercategory
return data
def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
def ensure(module, client):
name = module.params['cn']
state = module.params['state']
if state in ['present', 'enabled']:
ipaenabledflag = 'TRUE'
else:
ipaenabledflag = 'FALSE'
host = module.params['host']
hostcategory = module.params['hostcategory']
hostgroup = module.params['hostgroup']
service = module.params['service']
servicecategory = module.params['servicecategory']
servicegroup = module.params['servicegroup']
sourcehost = module.params['sourcehost']
sourcehostcategory = module.params['sourcehostcategory']
sourcehostgroup = module.params['sourcehostgroup']
user = module.params['user']
usercategory = module.params['usercategory']
usergroup = module.params['usergroup']
module_hbacrule = get_hbacrule_dict(description=module.params['description'],
hostcategory=hostcategory,
ipaenabledflag=ipaenabledflag,
servicecategory=servicecategory,
sourcehostcategory=sourcehostcategory,
usercategory=usercategory)
ipa_hbacrule = client.hbacrule_find(name=name)
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_hbacrule:
changed = True
if not module.check_mode:
ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
else:
diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_hbacrule.get(key)
client.hbacrule_mod(name=name, item=data)
if host is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
client.hbacrule_add_host,
client.hbacrule_remove_host, 'host') or changed
if hostgroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
client.hbacrule_add_host,
client.hbacrule_remove_host, 'hostgroup') or changed
if service is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
client.hbacrule_add_service,
client.hbacrule_remove_service, 'hbacsvc') or changed
if servicegroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
servicegroup,
client.hbacrule_add_service,
client.hbacrule_remove_service, 'hbacsvcgroup') or changed
if sourcehost is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
client.hbacrule_add_sourcehost,
client.hbacrule_remove_sourcehost, 'host') or changed
if sourcehostgroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
client.hbacrule_add_sourcehost,
client.hbacrule_remove_sourcehost, 'hostgroup') or changed
if user is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
client.hbacrule_add_user,
client.hbacrule_remove_user, 'user') or changed
if usergroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
client.hbacrule_add_user,
client.hbacrule_remove_user, 'group') or changed
else:
if ipa_hbacrule:
changed = True
if not module.check_mode:
client.hbacrule_del(name=name)
return changed, client.hbacrule_find(name=name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str'),
host=dict(type='list'),
hostcategory=dict(type='str', choices=['all']),
hostgroup=dict(type='list'),
service=dict(type='list'),
servicecategory=dict(type='str', choices=['all']),
servicegroup=dict(type='list'),
sourcehost=dict(type='list'),
sourcehostcategory=dict(type='str', choices=['all']),
sourcehostgroup=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
user=dict(type='list'),
usercategory=dict(type='str', choices=['all']),
usergroup=dict(type='list'))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True
)
client = HBACRuleIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, hbacrule = ensure(module, client)
module.exit_json(changed=changed, hbacrule=hbacrule)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | -7,225,423,844,489,080,000 | 38.363095 | 121 | 0.606155 | false |
jswope00/griffinx | common/test/acceptance/tests/lms/test_lms_acid_xblock.py | 122 | 5837 | # -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from unittest import expectedFailure
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(UniqueCourseTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
self.setup_fixtures()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
def validate_acid_block_view(self, acid_block):
"""
Verify that the LMS view for the Acid Block is correct
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
def validate_acid_parent_block_view(self, acid_parent_block):
super(XBlockAcidChildTest, self).validate_acid_block_view(acid_parent_block)
self.assertTrue(acid_parent_block.child_tests_passed)
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_parent_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid_parent]')
self.validate_acid_parent_block_view(acid_parent_block)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidAsideTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
@expectedFailure
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_aside = AcidView(self.browser, '.xblock_asides-v1-student_view[data-block-type=acid_aside]')
self.validate_acid_aside_view(acid_aside)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
def validate_acid_aside_view(self, acid_aside):
self.validate_acid_block_view(acid_aside)
| agpl-3.0 | 3,285,360,143,309,951,500 | 33.134503 | 105 | 0.599109 | false |
jelugbo/hebs_repo | lms/envs/common.py | 2 | 62020 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614, C0103
import sys
import os
import imp
from path import path
from warnings import simplefilter
from django.utils.translation import ugettext_lazy as _
from .discussionsettings import *
from xmodule.modulestore.modulestore_settings import update_module_store_settings
from lms.lib.xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "Your Platform Name Here"
CC_MERCHANT_NAME = PLATFORM_NAME
PLATFORM_FACEBOOK_ACCOUNT = "http://www.facebook.com/YourPlatformFacebookAccount"
PLATFORM_TWITTER_ACCOUNT = "@YourPlatformTwitterAccount"
PLATFORM_TWITTER_URL = "https://twitter.com/YourPlatformTwitterAccount"
PLATFORM_MEETUP_URL = "http://www.meetup.com/YourMeetup"
PLATFORM_LINKEDIN_URL = "http://www.linkedin.com/company/YourPlatform"
PLATFORM_GOOGLE_PLUS_URL = "https://plus.google.com/YourGooglePlusAccount/"
COURSEWARE_ENABLED = True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
# Features
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Toggles OAuth2 authentication provider
'ENABLE_OAUTH2_PROVIDER': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': True,
# Analytics experiments - shows instructor analytics tab in LMS instructor dashboard.
# Enabling this feature depends on installation of a separate analytics server.
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# Segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
'INDIVIDUAL_DUE_DATES': False,
# Enable legacy instructor dashboard
'ENABLE_INSTRUCTOR_LEGACY_DASHBOARD': True,
# Is this an edX-owned domain? (used on instructor dashboard)
'IS_EDX_DOMAIN': False,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT': False,
# Allow users to enroll with methods other than just honor code certificates
'MULTIPLE_ENROLLMENT_ROLES': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': False,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': False,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the new instructor dashboard will write
# grades CSV files to S3 and give links for downloads.
'ENABLE_S3_GRADE_DOWNLOADS': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# Give course staff unrestricted access to grade downloads (if set to False,
# only edX superusers can perform the downloads)
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
'ENABLED_PAYMENT_REPORTS': ["refund_report", "itemized_purchase_report", "university_revenue_share", "certificate_status"],
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which enable embargoing for particular courses
'EMBARGO': False,
# Toggles the embargo site functionality, which enable embargoing for the whole site
'SITE_EMBARGOED': False,
# Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means
# that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the
# defaults, so that we maintain current behavior
'ALLOW_WIKI_ROOT_ACCESS': True,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb
# if you enable this; we don't create tables by default.
'ENABLE_THIRD_PARTY_AUTH': False,
# Toggle to enable alternate urls for marketing links
'ENABLE_MKTG_SITE': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Show a "Download your certificate" on the Progress page if the lowest
# nonzero grade cutoff is met
'SHOW_PROGRESS_SUCCESS_BUTTON': False,
# Analytics Data API (for active student count)
# Default to false here b/c dev environments won't have the api, will override in aws.py
'ENABLE_ANALYTICS_ACTIVE_COUNT': False,
# When a logged in user goes to the homepage ('/') should the user be
# redirected to the dashboard - this is default Open edX behavior. Set to
# False to not redirect the user
'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER': True,
# Expose Mobile REST API. Note that if you use this, you must also set
# ENABLE_OAUTH2_PROVIDER to True
'ENABLE_MOBILE_REST_API': False,
# Video Abstraction Layer used to allow video teams to manage video assets
# independently of courseware. https://github.com/edx/edx-val
'ENABLE_VIDEO_ABSTRACTION_LAYER_API': False,
# Enable the new dashboard, account, and profile pages
'ENABLE_NEW_DASHBOARD': False,
}
# Ignore static asset files on import which match this pattern
ASSET_IGNORE_REGEX = r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)"
# Used for A/B testing
DEFAULT_GROUPS = []
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
# Used with XQueue
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
COURSES_ROOT = ENV_ROOT / "data"
DATA_DIR = COURSES_ROOT
# TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For Node.js
system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules')
node_paths = [
COMMON_ROOT / "static/js/vendor",
COMMON_ROOT / "static/coffee/src",
system_node_path,
]
NODE_PATH = ':'.join(node_paths)
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
# Where to look for a status message
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
############################ OpenID Provider ##################################
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = 'https:/example.com/oauth2'
# OpenID Connect claim handlers
OAUTH_OIDC_ID_TOKEN_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicIDTokenHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.IDTokenHandler'
)
OAUTH_OIDC_USERINFO_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicUserInfoHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.UserInfoHandler'
)
################################## EDX WEB #####################################
# This is where we stick our compiled template files. Most of the app uses Mako
# templates
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_lms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates']
# This is where Django Template lookup is defined. There are a few of these
# still left lying around.
TEMPLATE_DIRS = [
PROJECT_ROOT / "templates",
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
# Added for django-wiki
'django.core.context_processors.media',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
# Hack to get required link URLs to password reset templates
'edxmako.shortcuts.marketing_link_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.open_source_footer_context_processor',
# Shoppingcart processor (detects if request.user has a cart)
'shoppingcart.context_processor.user_has_cart_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.microsite_footer_context_processor',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
MAX_FILEUPLOADS_PER_INPUT = 20
# FIXME:
# We should have separate S3 staged URLs in case we need to make changes to
# these assets and test them.
LIB_URL = '/static/js/'
# Dev machines shouldn't need the book
# BOOK_URL = '/static/book/'
BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys
RSS_TIMEOUT = 600
# Configuration option for when we want to grab server error pages
STATIC_GRAB = False
DEV_CONTENT = True
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/accounts/login'
LOGIN_URL = EDX_ROOT_URL + '/accounts/login'
COURSE_NAME = "6.002_Spring_2012"
COURSE_NUMBER = "6.002x"
COURSE_TITLE = "Circuits and Electronics"
### Dark code. Should be enabled in local settings for devel.
ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome)
WIKI_ENABLED = False
###
COURSE_DEFAULT = '6.002x_Fall_2012'
COURSE_SETTINGS = {
'6.002x_Fall_2012': {
'number': '6.002x',
'title': 'Circuits and Electronics',
'xmlpath': '6002x/',
'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012',
}
}
# IP addresses that are allowed to reload the course, etc.
# TODO (vshnayder): Will probably need to change as we get real access control in.
LMS_MIGRATION_ALLOWED_IPS = []
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
# Note: these intentionally greedily grab all chars up to the next slash including any pluses
# DHM: I really wanted to ensure the separators were the same (+ or /) but all patts I tried had
# too many inadvertent side effects :-(
COURSE_KEY_PATTERN = r'(?P<course_key_string>[^/+]+(/|\+)[^/+]+(/|\+)[^/]+)'
COURSE_ID_PATTERN = COURSE_KEY_PATTERN.replace('course_key_string', 'course_id')
COURSE_KEY_REGEX = COURSE_KEY_PATTERN.replace('P<course_key_string>', ':')
USAGE_KEY_PATTERN = r'(?P<usage_key_string>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
ASSET_KEY_PATTERN = r'(?P<asset_key_string>(?:/?c4x(:/)?/[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
USAGE_ID_PATTERN = r'(?P<usage_id>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
############################## EVENT TRACKING #################################
# FIXME: Should we be doing this truncation?
TRACK_MAX_EVENT = 50000
DEBUG_TRACK_LOG = False
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat', r'^/segmentio/event']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
# Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag.
# In the future, adding the backend to TRACKING_BACKENDS should be enough.
if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
EVENT_TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
TRACKING_SEGMENTIO_WEBHOOK_SECRET = None
TRACKING_SEGMENTIO_ALLOWED_ACTIONS = ['Track', 'Screen']
TRACKING_SEGMENTIO_ALLOWED_CHANNELS = ['mobile']
######################## GOOGLE ANALYTICS ###########################
GOOGLE_ANALYTICS_ACCOUNT = None
GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY'
######################## OPTIMIZELY ###########################
OPTIMIZELY_PROJECT_ID = None
######################## subdomain specific settings ###########################
COURSE_LISTINGS = {}
SUBDOMAIN_BRANDING = {}
VIRTUAL_UNIVERSITIES = []
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in the LMS
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############# ModuleStore Configuration ##########
MODULESTORE_BRANCH = 'published-only'
CONTENTSTORE = None
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
},
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
]
}
}
}
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
# Some courses are allowed to run unsafe code. This is a list of regexes, one
# of them must match the course id for that course to run unsafe code.
#
# For example:
#
# COURSES_WITH_UNSAFE_CODE = [
# r"Harvard/XY123.1/.*"
# ]
COURSES_WITH_UNSAFE_CODE = []
############################### DJANGO BUILT-INS ###############################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
USE_TZ = True
SESSION_COOKIE_SECURE = False
# CMS base
CMS_BASE = 'localhost:8001'
# Site info
SITE_ID = 1
SITE_NAME = "example.com"
HTTPS = 'on'
ROOT_URLCONF = 'lms.urls'
# NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*'
# Platform Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
TECH_SUPPORT_EMAIL = '[email protected]'
CONTACT_EMAIL = '[email protected]'
BUGS_EMAIL = '[email protected]'
UNIVERSITY_EMAIL = '[email protected]'
PRESS_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles"
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
FAVICON_PATH = 'images/favicon.ico'
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
# these languages display right to left
LANGUAGES_BIDI = ("en@rtl", "he", "ar", "fa", "ur", "fa-ir")
# Sourced from http://www.localeplanet.com/icu/ and wikipedia
LANGUAGES = (
('en', u'English'),
('en@rtl', u'English (right-to-left)'),
('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing
('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod)
('am', u'አማርኛ'), # Amharic
('ar', u'العربية'), # Arabic
('az', u'azərbaycanca'), # Azerbaijani
('bg-bg', u'български (България)'), # Bulgarian (Bulgaria)
('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh)
('bn-in', u'বাংলা (ভারত)'), # Bengali (India)
('bs', u'bosanski'), # Bosnian
('ca', u'Català'), # Catalan
('ca@valencia', u'Català (València)'), # Catalan (Valencia)
('cs', u'Čeština'), # Czech
('cy', u'Cymraeg'), # Welsh
('da', u'dansk'), # Danish
('de-de', u'Deutsch (Deutschland)'), # German (Germany)
('el', u'Ελληνικά'), # Greek
('en-uk', u'English (United Kingdom)'), # English (United Kingdom)
('en@lolcat', u'LOLCAT English'), # LOLCAT English
('en@pirate', u'Pirate English'), # Pirate English
('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America)
('es-ar', u'Español (Argentina)'), # Spanish (Argentina)
('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador)
('es-es', u'Español (España)'), # Spanish (Spain)
('es-mx', u'Español (México)'), # Spanish (Mexico)
('es-pe', u'Español (Perú)'), # Spanish (Peru)
('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia)
('eu-es', u'euskara (Espainia)'), # Basque (Spain)
('fa', u'فارسی'), # Persian
('fa-ir', u'فارسی (ایران)'), # Persian (Iran)
('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland)
('fil', u'Filipino'), # Filipino
('fr', u'Français'), # French
('gl', u'Galego'), # Galician
('gu', u'ગુજરાતી'), # Gujarati
('he', u'עברית'), # Hebrew
('hi', u'हिन्दी'), # Hindi
('hr', u'hrvatski'), # Croatian
('hu', u'magyar'), # Hungarian
('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia)
('id', u'Bahasa Indonesia'), # Indonesian
('it-it', u'Italiano (Italia)'), # Italian (Italy)
('ja-jp', u'日本語 (日本)'), # Japanese (Japan)
('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan)
('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia)
('kn', u'ಕನ್ನಡ'), # Kannada
('ko-kr', u'한국어 (대한민국)'), # Korean (Korea)
('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania)
('ml', u'മലയാളം'), # Malayalam
('mn', u'Монгол хэл'), # Mongolian
('mr', u'मराठी'), # Marathi
('ms', u'Bahasa Melayu'), # Malay
('nb', u'Norsk bokmål'), # Norwegian Bokmål
('ne', u'नेपाली'), # Nepali
('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands)
('or', u'ଓଡ଼ିଆ'), # Oriya
('pl', u'Polski'), # Polish
('pt-br', u'Português (Brasil)'), # Portuguese (Brazil)
('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal)
('ro', u'română'), # Romanian
('ru', u'Русский'), # Russian
('si', u'සිංහල'), # Sinhala
('sk', u'Slovenčina'), # Slovak
('sl', u'Slovenščina'), # Slovenian
('sq', u'shqip'), # Albanian
('sr', u'Српски'), # Serbian
('sv', u'svenska'), # Swedish
('sw', u'Kiswahili'), # Swahili
('ta', u'தமிழ்'), # Tamil
('te', u'తెలుగు'), # Telugu
('th', u'ไทย'), # Thai
('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey)
('uk', u'Українська'), # Ukranian
('ur', u'اردو'), # Urdu
('vi', u'Tiếng Việt'), # Vietnamese
('uz', u'Ўзбек'), # Uzbek
('zh-cn', u'中文 (简体)'), # Chinese (China)
('zh-hk', u'中文 (香港)'), # Chinese (Hong Kong)
('zh-tw', u'中文 (台灣)'), # Chinese (Taiwan)
)
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Guidelines for translators
TRANSLATORS_GUIDE = 'https://github.com/edx/edx-platform/blob/master/docs/en_us/developers/source/i18n_translators_guide.rst'
#################################### GITHUB #######################################
# gitreload is used in LMS-workflow to pull content from github
# gitreload requests are only allowed from these IP addresses, which are
# the advertised public IPs of the github WebHook servers.
# These are listed, eg at https://github.com/edx/edx-platform/admin/hooks
ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178']
#################################### AWS #######################################
# S3BotoStorage insists on a timeout for uploaded assets. We should make it
# permanent instead, but rather than trying to figure out exactly where that
# setting is, I'm just bumping the expiration time to something absurd (100
# years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3
# in the global settings.py
AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years
################################# SIMPLEWIKI ###################################
SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True
SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False
################################# WIKI ###################################
from course_wiki import settings as course_wiki_settings
WIKI_ACCOUNT_HANDLING = False
WIKI_EDITOR = 'course_wiki.editors.CodeMirror'
WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb
WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out
WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE
WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE
WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS
WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
WIKI_LINK_LIVE_LOOKUPS = False
WIKI_LINK_DEFAULT_LEVEL = 2
##### Feedback submission mechanism #####
FEEDBACK_SUBMISSION_EMAIL = None
##### Zendesk #####
ZENDESK_URL = None
ZENDESK_USER = None
ZENDESK_API_KEY = None
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
##### shoppingcart Payment #####
PAYMENT_SUPPORT_EMAIL = '[email protected]'
##### Using cybersource by default #####
CC_PROCESSOR_NAME = 'CyberSource'
CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': '',
'MERCHANT_ID': '',
'SERIAL_NUMBER': '',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
},
'CyberSource2': {
"PURCHASE_ENDPOINT": '',
"SECRET_KEY": '',
"ACCESS_KEY": '',
"PROFILE_ID": '',
}
}
# Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS
PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$']
# Members of this group are allowed to generate payment reports
PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access'
################################# open ended grading config #####################
#By setting up the default settings with an incorrect user name and password,
# will get an error when attempting to connect
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://example.com/peer_grading',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
# Used for testing, debugging peer grading
MOCK_PEER_GRADING = False
# Used for testing, debugging staff grading
MOCK_STAFF_GRADING = False
################################# Jasmine ##################################
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'edxmako.makoloader.MakoFilesystemLoader',
'edxmako.makoloader.MakoAppDirectoriesLoader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'microsite_configuration.middleware.MicrositeMiddleware',
'django_comment_client.middleware.AjaxExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Instead of AuthenticationMiddleware, we use a cached backed version
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
# Adds user tags to tracking events
# Must go before TrackMiddleware, to get the context set up
'user_api.middleware.UserTagsEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'splash.middleware.SplashMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'geoinfo.middleware.CountryMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Allows us to set user preferences
# should be after DarkLangMiddleware
'lang_pref.middleware.LanguagePreferenceMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# to redirected unenrolled students to the course info page
'courseware.middleware.RedirectUnenrolledMiddleware',
'course_wiki.middleware.WikiAccessMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
courseware_js = (
[
'coffee/src/' + pth + '.js'
for pth in ['courseware', 'histogram', 'navigation', 'time']
] +
['js/' + pth + '.js' for pth in ['ajax-error']] +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js'))
)
# Before a student accesses courseware, we do not
# need many of the JS dependencies. This includes
# only the dependencies used everywhere in the LMS
# (including the dashboard/account/profile pages)
# Currently, this partially duplicates the "main vendor"
# JavaScript file, so only one of the two should be included
# on a page at any time.
# In the future, we will likely refactor this to use
# RequireJS and an optimizer.
base_vendor_js = [
'js/vendor/jquery.min.js',
'js/vendor/jquery.cookie.js',
'js/vendor/underscore-min.js'
]
main_vendor_js = base_vendor_js + [
'js/vendor/require.js',
'js/RequireJS-namespace-undefine.js',
'js/vendor/json2.js',
'js/vendor/jquery-ui.min.js',
'js/vendor/jquery.qtip.min.js',
'js/vendor/swfobject/swfobject.js',
'js/vendor/jquery.ba-bbq.min.js',
'js/vendor/ova/annotator-full.js',
'js/vendor/ova/annotator-full-firebase-auth.js',
'js/vendor/ova/video.dev.js',
'js/vendor/ova/vjs.youtube.js',
'js/vendor/ova/rangeslider.js',
'js/vendor/ova/share-annotator.js',
'js/vendor/ova/richText-annotator.js',
'js/vendor/ova/reply-annotator.js',
'js/vendor/ova/tags-annotator.js',
'js/vendor/ova/flagging-annotator.js',
'js/vendor/ova/diacritic-annotator.js',
'js/vendor/ova/grouping-annotator.js',
'js/vendor/ova/jquery-Watch.js',
'js/vendor/ova/openseadragon.js',
'js/vendor/ova/OpenSeaDragonAnnotation.js',
'js/vendor/ova/ova.js',
'js/vendor/ova/catch/js/catch.js',
'js/vendor/ova/catch/js/handlebars-1.1.2.js',
'js/vendor/URI.min.js',
]
dashboard_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/dashboard/**/*.js'))
discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js'))
staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js'))
open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js'))
notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js'))
instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js'))
# JavaScript used by the student account and profile pages
# These are not courseware, so they do not need many of the courseware-specific
# JavaScript modules.
student_account_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/student_account/**/*.js'))
student_profile_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/student_profile/**/*.js'))
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
],
'output_filename': 'css/lms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/application.css',
'sass/ie.css'
],
'output_filename': 'css/lms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/application-extend1.css',
],
'output_filename': 'css/lms-style-app-extend1.css',
},
'style-app-extend2': {
'source_filenames': [
'sass/application-extend2.css',
],
'output_filename': 'css/lms-style-app-extend2.css',
},
'style-app-rtl': {
'source_filenames': [
'sass/application-rtl.css',
'sass/ie-rtl.css'
],
'output_filename': 'css/lms-style-app-rtl.css',
},
'style-app-extend1-rtl': {
'source_filenames': [
'sass/application-extend1-rtl.css',
],
'output_filename': 'css/lms-style-app-extend1-rtl.css',
},
'style-app-extend2-rtl': {
'source_filenames': [
'sass/application-extend2-rtl.css',
],
'output_filename': 'css/lms-style-app-extend2-rtl.css',
},
'style-course-vendor': {
'source_filenames': [
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/jquery.treeview.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
],
'output_filename': 'css/lms-style-course-vendor.css',
},
'style-course': {
'source_filenames': [
'sass/course.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course.css',
},
'style-course-rtl': {
'source_filenames': [
'sass/course-rtl.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/lms-style-xmodule-annotations.css',
},
}
common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
PIPELINE_JS = {
'application': {
# Application will contain all paths not in courseware_only_js
'source_filenames': sorted(common_js) + sorted(project_js) + [
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
'js/src/ie_shim.js',
'js/src/string_utils.js',
],
'output_filename': 'js/lms-application.js',
},
'courseware': {
'source_filenames': courseware_js,
'output_filename': 'js/lms-courseware.js',
},
'base_vendor': {
'source_filenames': base_vendor_js,
'output_filename': 'js/lms-base-vendor.js',
},
'main_vendor': {
'source_filenames': main_vendor_js,
'output_filename': 'js/lms-main_vendor.js',
},
'module-descriptor-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'),
'output_filename': 'js/lms-module-descriptors.js',
},
'module-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'),
'output_filename': 'js/lms-modules.js',
},
'discussion': {
'source_filenames': discussion_js,
'output_filename': 'js/discussion.js',
},
'staff_grading': {
'source_filenames': staff_grading_js,
'output_filename': 'js/staff_grading.js',
},
'open_ended': {
'source_filenames': open_ended_js,
'output_filename': 'js/open_ended.js',
},
'notes': {
'source_filenames': notes_js,
'output_filename': 'js/notes.js',
},
'instructor_dash': {
'source_filenames': instructor_dash_js,
'output_filename': 'js/instructor_dash.js',
},
'dashboard': {
'source_filenames': dashboard_js,
'output_filename': 'js/dashboard.js'
},
'student_account': {
'source_filenames': student_account_js,
'output_filename': 'js/student_account.js'
},
'student_profile': {
'source_filenames': student_profile_js,
'output_filename': 'js/student_profile.js'
},
}
PIPELINE_DISABLE_WRAPPER = True
# Compile all coffee files in course data directories if they are out of date.
# TODO: Remove this once we move data into Mongo. This is only temporary while
# course data directories are still in use.
if os.path.isdir(DATA_DIR):
for course_dir in os.listdir(DATA_DIR):
js_dir = DATA_DIR / course_dir / "js"
if not os.path.isdir(js_dir):
continue
for filename in os.listdir(js_dir):
if filename.endswith('coffee'):
new_filename = os.path.splitext(filename)[0] + ".js"
if os.path.exists(js_dir / new_filename):
coffee_timestamp = os.stat(js_dir / filename).st_mtime
js_timestamp = os.stat(js_dir / new_filename).st_mtime
if coffee_timestamp <= js_timestamp:
continue
os.system("rm %s" % (js_dir / new_filename))
os.system("coffee -c %s" % (js_dir / filename))
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = "pipeline.compressors.uglifyjs.UglifyJSCompressor"
STATICFILES_IGNORE_PATTERNS = (
"sass/*",
"coffee/*",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_UGLIFYJS_BINARY='node_modules/.bin/uglifyjs'
# Setting that will only affect the edX version of django-pipeline until our changes are merged upstream
PIPELINE_COMPILE_INPLACE = True
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
HIGH_MEM_QUEUE = 'edx.core.high_mem'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# let logging work as configured:
CELERYD_HIJACK_ROOT_LOGGER = False
################################ Bulk Email ###################################
# Suffix used to construct 'from' email address for bulk emails.
# A course-specific identifier is prepended.
BULK_EMAIL_DEFAULT_FROM_EMAIL = '[email protected]'
# Parameters for breaking down course enrollment into subtasks.
BULK_EMAIL_EMAILS_PER_TASK = 100
# Initial delay used for retrying tasks. Additional retries use
# longer delays. Value is in seconds.
BULK_EMAIL_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
BULK_EMAIL_MAX_RETRIES = 5
# Maximum number of retries per task for errors that are related to
# throttling. If this is not set, then there is no cap on such retries.
BULK_EMAIL_INFINITE_RETRY_CAP = 1000
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# Flag to indicate if individual email addresses should be logged as they are sent
# a bulk email message.
BULK_EMAIL_LOG_SENT_EMAILS = False
# Delay in seconds to sleep between individual mail messages being sent,
# when a bulk email task is retried for rate-related reasons. Choose this
# value depending on the number of workers that might be sending email in
# parallel, and what the SES rate is.
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
################################### APPS ######################################
INSTALLED_APPS = (
# Standard ones that are always installed...
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'south',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# Our courseware
'circuit',
'courseware',
'student',
'static_template_view',
'staticbook',
'track',
'eventtracking.django',
'util',
'certificates',
'dashboard',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',
'course_groups',
'bulk_email',
# External auth (OpenID, shib)
'external_auth',
'django_openid_auth',
# OAuth2 Provider
'provider',
'provider.oauth2',
'oauth2_provider',
# For the wiki
'wiki', # The new django-wiki from benjaoming
'django_notify',
'course_wiki', # Our customizations
'mptt',
'sekizai',
#'wiki.plugins.attachments',
'wiki.plugins.links',
'wiki.plugins.notifications',
'course_wiki.plugins.markdownedx',
# Foldit integration
'foldit',
# For testing
'django.contrib.admin', # only used in DEBUG mode
'django_nose',
'debug',
# Discussion forums
'django_comment_client',
'django_comment_common',
'notes',
# Splash screen
'splash',
# Monitoring
'datadog',
# User API
'rest_framework',
'user_api',
# Shopping cart
'shoppingcart',
# Notification preferences setting
'notification_prefs',
'notifier_api',
# Different Course Modes
'course_modes',
# Student Identity Verification
'verify_student',
# Dark-launching languages
'dark_lang',
# Microsite configuration
'microsite_configuration',
# Student Identity Reverification
'reverification',
'embargo',
# Monitoring functionality
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
# Country list
'django_countries',
# edX Mobile API
'mobile_api',
)
######################### MARKETING SITE ###############################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
'ABOUT': 'about_edx',
'CONTACT': 'contact',
'FAQ': 'help_edx',
'COURSES': 'courses',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor',
'PRIVACY': 'privacy_edx',
'JOBS': 'jobs',
'NEWS': 'news',
'PRESS': 'press',
'BLOG': 'edx-blog',
'DONATE': 'donate',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
}
################# Student Verification #################
VERIFY_STUDENT = {
"DAYS_GOOD_FOR": 365, # How many days is a verficiation good for?
}
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = False
if FEATURES.get('CLASS_DASHBOARD'):
INSTALLED_APPS += ('class_dashboard',)
######################## CAS authentication ###########################
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = 'https://provide_your_cas_url_here'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
###################### Registration ##################################
# For each of the fields, give one of the following values:
# - 'required': to display the field, and make it mandatory
# - 'optional': to display the field, and make it non-mandatory
# - 'hidden': to not display the field
REGISTRATION_EXTRA_FIELDS = {
'level_of_education': 'optional',
'gender': 'optional',
'year_of_birth': 'optional',
'mailing_address': 'optional',
'goals': 'optional',
'honor_code': 'required',
'city': 'hidden',
'country': 'hidden',
}
########################## CERTIFICATE NAME ########################
CERT_NAME_SHORT = "Certificate"
CERT_NAME_LONG = "Certificate of Achievement"
###################### Grade Downloads ######################
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': '/tmp/edx-s3/grades',
}
######################## PROGRESS SUCCESS BUTTON ##############################
# The following fields are available in the URL: {course_id} {student_id}
PROGRESS_SUCCESS_BUTTON_URL = 'http://<domain>/<path>/{course_id}'
PROGRESS_SUCCESS_BUTTON_TEXT_OVERRIDE = None
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##################### LinkedIn #####################
INSTALLED_APPS += ('django_openid_auth',)
############################ LinkedIn Integration #############################
INSTALLED_APPS += ('linkedin',)
LINKEDIN_API = {
'EMAIL_WHITELIST': [],
'COMPANY_ID': '2746406',
}
############################ ORA 2 ############################################
# By default, don't use a file prefix
ORA2_FILE_PREFIX = None
# Default File Upload Storage bucket and prefix. Used by the FileUpload Service.
FILE_UPLOAD_STORAGE_BUCKET_NAME = 'edxuploads'
FILE_UPLOAD_STORAGE_PREFIX = 'submissions_attachments'
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC'
# Source:
# http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1
ALL_LANGUAGES = (
[u"aa", u"Afar"],
[u"ab", u"Abkhazian"],
[u"af", u"Afrikaans"],
[u"ak", u"Akan"],
[u"sq", u"Albanian"],
[u"am", u"Amharic"],
[u"ar", u"Arabic"],
[u"an", u"Aragonese"],
[u"hy", u"Armenian"],
[u"as", u"Assamese"],
[u"av", u"Avaric"],
[u"ae", u"Avestan"],
[u"ay", u"Aymara"],
[u"az", u"Azerbaijani"],
[u"ba", u"Bashkir"],
[u"bm", u"Bambara"],
[u"eu", u"Basque"],
[u"be", u"Belarusian"],
[u"bn", u"Bengali"],
[u"bh", u"Bihari languages"],
[u"bi", u"Bislama"],
[u"bs", u"Bosnian"],
[u"br", u"Breton"],
[u"bg", u"Bulgarian"],
[u"my", u"Burmese"],
[u"ca", u"Catalan"],
[u"ch", u"Chamorro"],
[u"ce", u"Chechen"],
[u"zh", u"Chinese"],
[u"cu", u"Church Slavic"],
[u"cv", u"Chuvash"],
[u"kw", u"Cornish"],
[u"co", u"Corsican"],
[u"cr", u"Cree"],
[u"cs", u"Czech"],
[u"da", u"Danish"],
[u"dv", u"Divehi"],
[u"nl", u"Dutch"],
[u"dz", u"Dzongkha"],
[u"en", u"English"],
[u"eo", u"Esperanto"],
[u"et", u"Estonian"],
[u"ee", u"Ewe"],
[u"fo", u"Faroese"],
[u"fj", u"Fijian"],
[u"fi", u"Finnish"],
[u"fr", u"French"],
[u"fy", u"Western Frisian"],
[u"ff", u"Fulah"],
[u"ka", u"Georgian"],
[u"de", u"German"],
[u"gd", u"Gaelic"],
[u"ga", u"Irish"],
[u"gl", u"Galician"],
[u"gv", u"Manx"],
[u"el", u"Greek"],
[u"gn", u"Guarani"],
[u"gu", u"Gujarati"],
[u"ht", u"Haitian"],
[u"ha", u"Hausa"],
[u"he", u"Hebrew"],
[u"hz", u"Herero"],
[u"hi", u"Hindi"],
[u"ho", u"Hiri Motu"],
[u"hr", u"Croatian"],
[u"hu", u"Hungarian"],
[u"ig", u"Igbo"],
[u"is", u"Icelandic"],
[u"io", u"Ido"],
[u"ii", u"Sichuan Yi"],
[u"iu", u"Inuktitut"],
[u"ie", u"Interlingue"],
[u"ia", u"Interlingua"],
[u"id", u"Indonesian"],
[u"ik", u"Inupiaq"],
[u"it", u"Italian"],
[u"jv", u"Javanese"],
[u"ja", u"Japanese"],
[u"kl", u"Kalaallisut"],
[u"kn", u"Kannada"],
[u"ks", u"Kashmiri"],
[u"kr", u"Kanuri"],
[u"kk", u"Kazakh"],
[u"km", u"Central Khmer"],
[u"ki", u"Kikuyu"],
[u"rw", u"Kinyarwanda"],
[u"ky", u"Kirghiz"],
[u"kv", u"Komi"],
[u"kg", u"Kongo"],
[u"ko", u"Korean"],
[u"kj", u"Kuanyama"],
[u"ku", u"Kurdish"],
[u"lo", u"Lao"],
[u"la", u"Latin"],
[u"lv", u"Latvian"],
[u"li", u"Limburgan"],
[u"ln", u"Lingala"],
[u"lt", u"Lithuanian"],
[u"lb", u"Luxembourgish"],
[u"lu", u"Luba-Katanga"],
[u"lg", u"Ganda"],
[u"mk", u"Macedonian"],
[u"mh", u"Marshallese"],
[u"ml", u"Malayalam"],
[u"mi", u"Maori"],
[u"mr", u"Marathi"],
[u"ms", u"Malay"],
[u"mg", u"Malagasy"],
[u"mt", u"Maltese"],
[u"mn", u"Mongolian"],
[u"na", u"Nauru"],
[u"nv", u"Navajo"],
[u"nr", u"Ndebele, South"],
[u"nd", u"Ndebele, North"],
[u"ng", u"Ndonga"],
[u"ne", u"Nepali"],
[u"nn", u"Norwegian Nynorsk"],
[u"nb", u"Bokmål, Norwegian"],
[u"no", u"Norwegian"],
[u"ny", u"Chichewa"],
[u"oc", u"Occitan"],
[u"oj", u"Ojibwa"],
[u"or", u"Oriya"],
[u"om", u"Oromo"],
[u"os", u"Ossetian"],
[u"pa", u"Panjabi"],
[u"fa", u"Persian"],
[u"pi", u"Pali"],
[u"pl", u"Polish"],
[u"pt", u"Portuguese"],
[u"ps", u"Pushto"],
[u"qu", u"Quechua"],
[u"rm", u"Romansh"],
[u"ro", u"Romanian"],
[u"rn", u"Rundi"],
[u"ru", u"Russian"],
[u"sg", u"Sango"],
[u"sa", u"Sanskrit"],
[u"si", u"Sinhala"],
[u"sk", u"Slovak"],
[u"sl", u"Slovenian"],
[u"se", u"Northern Sami"],
[u"sm", u"Samoan"],
[u"sn", u"Shona"],
[u"sd", u"Sindhi"],
[u"so", u"Somali"],
[u"st", u"Sotho, Southern"],
[u"es", u"Spanish"],
[u"sc", u"Sardinian"],
[u"sr", u"Serbian"],
[u"ss", u"Swati"],
[u"su", u"Sundanese"],
[u"sw", u"Swahili"],
[u"sv", u"Swedish"],
[u"ty", u"Tahitian"],
[u"ta", u"Tamil"],
[u"tt", u"Tatar"],
[u"te", u"Telugu"],
[u"tg", u"Tajik"],
[u"tl", u"Tagalog"],
[u"th", u"Thai"],
[u"bo", u"Tibetan"],
[u"ti", u"Tigrinya"],
[u"to", u"Tonga (Tonga Islands)"],
[u"tn", u"Tswana"],
[u"ts", u"Tsonga"],
[u"tk", u"Turkmen"],
[u"tr", u"Turkish"],
[u"tw", u"Twi"],
[u"ug", u"Uighur"],
[u"uk", u"Ukrainian"],
[u"ur", u"Urdu"],
[u"uz", u"Uzbek"],
[u"ve", u"Venda"],
[u"vi", u"Vietnamese"],
[u"vo", u"Volapük"],
[u"cy", u"Welsh"],
[u"wa", u"Walloon"],
[u"wo", u"Wolof"],
[u"xh", u"Xhosa"],
[u"yi", u"Yiddish"],
[u"yo", u"Yoruba"],
[u"za", u"Zhuang"],
[u"zu", u"Zulu"]
)
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
# Stub for third_party_auth options.
# See common/djangoapps/third_party_auth/settings.py for configuration details.
THIRD_PARTY_AUTH = {}
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Analytics data api settings
ANALYTICS_DATA_URL = ""
ANALYTICS_DATA_TOKEN = ""
ANALYTICS_DASHBOARD_URL = ""
ANALYTICS_DASHBOARD_NAME = PLATFORM_NAME + " Insights"
# REGISTRATION CODES DISPLAY INFORMATION SUBTITUTIONS IN THE INVOICE ATTACHMENT
INVOICE_CORP_ADDRESS = "Please place your corporate address\nin this configuration"
INVOICE_PAYMENT_INSTRUCTIONS = "This is where you can\nput directions on how people\nbuying registration codes"
# Country code overrides
# Used by django-countries
COUNTRIES_OVERRIDE = {
"TW": _("Taiwan"),
}
| agpl-3.0 | -1,926,681,162,947,610,400 | 32.409978 | 180 | 0.62711 | false |
sachintyagi22/spark | examples/src/main/python/ml/generalized_linear_regression_example.py | 76 | 2506 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.regression import GeneralizedLinearRegression
# $example off$
"""
An example demonstrating generalized linear regression.
Run with:
bin/spark-submit examples/src/main/python/ml/generalized_linear_regression_example.py
"""
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("GeneralizedLinearRegressionExample")\
.getOrCreate()
# $example on$
# Load training data
dataset = spark.read.format("libsvm")\
.load("data/mllib/sample_linear_regression_data.txt")
glr = GeneralizedLinearRegression(family="gaussian", link="identity", maxIter=10, regParam=0.3)
# Fit the model
model = glr.fit(dataset)
# Print the coefficients and intercept for generalized linear regression model
print("Coefficients: " + str(model.coefficients))
print("Intercept: " + str(model.intercept))
# Summarize the model over the training set and print out some metrics
summary = model.summary
print("Coefficient Standard Errors: " + str(summary.coefficientStandardErrors))
print("T Values: " + str(summary.tValues))
print("P Values: " + str(summary.pValues))
print("Dispersion: " + str(summary.dispersion))
print("Null Deviance: " + str(summary.nullDeviance))
print("Residual Degree Of Freedom Null: " + str(summary.residualDegreeOfFreedomNull))
print("Deviance: " + str(summary.deviance))
print("Residual Degree Of Freedom: " + str(summary.residualDegreeOfFreedom))
print("AIC: " + str(summary.aic))
print("Deviance Residuals: ")
summary.residuals().show()
# $example off$
spark.stop()
| apache-2.0 | -5,122,695,177,416,111,000 | 36.969697 | 99 | 0.721468 | false |
hitszxp/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 40 | 12814 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros((100, 1)))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause | -3,065,065,606,213,625,300 | 36.034682 | 78 | 0.594038 | false |
TheWardoctor/Wardoctors-repo | script.module.fantastic/lib/resources/lib/sources/en/to_be_fixed/needsfixing/crazy.py | 1 | 6406 | # -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['crazy4tv.com', 'crazy4ad.in']
self.base_link = 'http://crazy4tv.com'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
scraper = cfscrape.create_scraper()
r = scraper.get(url).content
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
print posts
items = []
for post in posts:
try:
print post
items += zip(client.parseDOM(post, 'title'), client.parseDOM(post, 'link'))
except:
pass
items = [(i[0], i[1]) for i in items if data['year'] in i[0]]
print items[:1]
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1]
div = 1 if size.endswith(' GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| apache-2.0 | -3,745,853,968,392,189,400 | 36.244186 | 172 | 0.490946 | false |
vipullakhani/mi-instrument | mi/dataset/parser/dosta_ln_auv.py | 8 | 3170 | """
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/dosta_ln_auv.py
@author Jeff Roy
@brief Parser and particle Classes and tools for the dosta_ln_auv data
Release notes:
initial release
"""
__author__ = 'Jeff Roy'
__license__ = 'Apache 2.0'
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.parser.auv_common import \
AuvCommonParticle, \
AuvCommonParser, \
compute_timestamp
# The structure below is a list of tuples
# Each tuple consists of
# parameter name, index into raw data parts list, encoding function
DOSTA_LN_AUV_PARAM_MAP = [
# message ID is typically index 0
('mission_epoch', 1, int),
('auv_latitude', 2, float),
('auv_longitude', 3, float),
('mission_time', 4, int),
('m_depth', 5, float),
('salinity', 6, float),
('product_number', 7, int),
('serial_number', 8, str),
('estimated_oxygen_concentration', 9, float),
('estimated_oxygen_saturation', 10, float),
('optode_temperature', 11, float),
('calibrated_phase', 12, float),
('blue_phase', 13, float),
('red_phase', 14, float),
('blue_amplitude', 15, float),
('b_pot', 16, float),
('red_amplitude', 17, float),
('raw_temperature', 18, float),
('calculated_oxygen_concentration', 19, float),
('calculated_oxygen_saturation', 20, float),
('external_temperature', 21, float)
]
class DostaLnAuvInstrumentParticle(AuvCommonParticle):
_auv_param_map = DOSTA_LN_AUV_PARAM_MAP
# must provide a parameter map for _build_parsed_values
class DostaLnAuvTelemeteredParticle(DostaLnAuvInstrumentParticle):
# set the data_particle_type for the DataParticle class
_data_particle_type = "dosta_ln_auv_instrument"
class DostaLnAuvRecoveredParticle(DostaLnAuvInstrumentParticle):
# set the data_particle_type for the DataParticle class
_data_particle_type = "dosta_ln_auv_instrument_recovered"
DOSTA_LN_AUV_ID = '1109' # message ID of dost_ln records
DOSTA_LN_AUV_FIELD_COUNT = 22 # number of expected fields in an dost_ln record
DOSTA_LN_AUV_TELEMETERED_MESSAGE_MAP = [(DOSTA_LN_AUV_ID,
DOSTA_LN_AUV_FIELD_COUNT,
compute_timestamp,
DostaLnAuvTelemeteredParticle)]
DOSTA_LN_AUV_RECOVERED_MESSAGE_MAP = [(DOSTA_LN_AUV_ID,
DOSTA_LN_AUV_FIELD_COUNT,
compute_timestamp,
DostaLnAuvRecoveredParticle)]
class DostaLnAuvParser(AuvCommonParser):
def __init__(self,
stream_handle,
exception_callback,
is_telemetered):
if is_telemetered:
message_map = DOSTA_LN_AUV_TELEMETERED_MESSAGE_MAP
else:
message_map = DOSTA_LN_AUV_RECOVERED_MESSAGE_MAP
# provide message ID and # of fields to parent class
super(DostaLnAuvParser, self).__init__(stream_handle,
exception_callback,
message_map)
| bsd-2-clause | 4,454,281,206,801,478,700 | 28.082569 | 79 | 0.60694 | false |
stelfrich/openmicroscopy | components/tests/ui/library/python/ImageCheckLibrary.py | 14 | 1094 |
import Image
from numpy import asarray
def crop_image(path, cropX, cropY, cropW, cropH):
image = Image.open(path)
x = int(cropX)
y = int(cropY)
x2 = int(cropW) + x
y2 = int(cropH) + y
img = image.crop((x, y, x2, y2))
img.save(path)
def image_should_be_blank(path, expected=True):
image = Image.open(path)
image.save(path) # avoids errors on .split
blank = True
minVals = []
maxVals = []
for channel in image.split():
plane = asarray(channel)
pMin = plane.min()
pMax = plane.max()
minVals.append(pMin)
maxVals.append(pMax)
if pMin != pMax:
blank = False
if expected:
if not blank:
raise AssertionError("Image %s is not blank. min: %s, max: %s"
% (path, minVals, maxVals))
else:
if blank:
raise AssertionError("Image %s is blank. min: %s, max: %s"
% (path, minVals, maxVals))
def image_should_not_be_blank(path):
image_should_be_blank(expected=False)
| gpl-2.0 | -4,752,580,855,818,549,000 | 22.782609 | 74 | 0.542962 | false |
mrrrgn/AutobahnPython | examples/twisted/wamp/basic/pubsub/complex/backend.py | 8 | 1506 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import random
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import SubscribeOptions
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component that publishes events with no payload
and with complex payloads every second.
"""
@inlineCallbacks
def onJoin(self, details):
counter = 0
while True:
self.publish('com.myapp.heartbeat')
obj = {'counter': counter, 'foo': [1, 2, 3]}
self.publish('com.myapp.topic2', random.randint(0, 100), 23, c = "Hello", d = obj)
counter += 1
yield sleep(1)
| apache-2.0 | 3,080,699,031,500,599,300 | 31.042553 | 91 | 0.630146 | false |
jettisonjoe/openhtf | examples/phase_groups.py | 2 | 4400 | # Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example OpenHTF Phase Groups.
PhaseGroups are used to control phase shortcutting due to terminal errors to
better guarentee when teardown phases run.
"""
import openhtf as htf
def setup_phase(test):
test.logger.info('Setup in a group.')
def main_phase(test):
test.logger.info('This is a main phase.')
def teardown_phase(test):
test.logger.info('Teardown phase.')
def inner_main_phase(test):
test.logger.info('Inner main phase.')
def inner_teardown_phase(test):
test.logger.info('Inner teardown phase.')
def error_setup_phase(test):
test.logger.info('Error in setup phase.')
return htf.PhaseResult.STOP
def error_main_phase(test):
test.logger.info('Error in main phase.')
return htf.PhaseResult.STOP
def run_basic_group():
"""Run the basic phase group example.
In this example, there are no terminal phases; all phases are run.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute()
def run_setup_error_group():
"""Run the phase group example where an error occurs in a setup phase.
The terminal setup phase shortcuts the test. The main phases are
skipped. The PhaseGroup is not entered, so the teardown phases are also
skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[error_setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute()
def run_main_error_group():
"""Run the phase group example where an error occurs in a main phase.
The main phase in this example is terminal. The PhaseGroup was entered
because the setup phases ran without error, so the teardown phases are run.
The other main phase is skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[error_main_phase, main_phase],
teardown=[teardown_phase],
))
test.execute()
def run_nested_groups():
"""Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
"""
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute()
def run_nested_error_groups():
"""Run nested groups example where an error occurs in nested main phase.
In this example, the first main phase in the nested PhaseGroup errors out.
The other inner main phase is skipped, as is the outer main phase. Both
PhaseGroups were entered, so both teardown phases are run.
"""
test = htf.Test(
htf.PhaseGroup(
main=[
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
error_main_phase, main_phase),
main_phase,
],
teardown=[teardown_phase],
)
)
test.execute()
def run_nested_error_skip_unentered_groups():
"""Run nested groups example where an error occurs in outer main phase.
Lastly, the first main phase in the outer PhaseGroup errors out. This skips
the nested PhaseGroup and the other outer main phase. The outer PhaseGroup
was entered, so its teardown phase runs.
"""
test = htf.Test(
htf.PhaseGroup(
main=[
error_main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(main_phase),
main_phase,
],
teardown=[teardown_phase],
)
)
test.execute()
if __name__ == '__main__':
run_basic_group()
run_setup_error_group()
run_main_error_group()
run_nested_groups()
run_nested_error_groups()
run_nested_error_skip_unentered_groups()
| apache-2.0 | 7,505,461,385,569,393,000 | 25.506024 | 80 | 0.672727 | false |
LeeMendelowitz/malign_viz | server/cors.py | 8 | 1654 | from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator | gpl-3.0 | 6,683,719,636,215,468,000 | 35.777778 | 67 | 0.603386 | false |
hehongliang/tensorflow | tensorflow/contrib/learn/python/learn/estimators/rnn_common.py | 42 | 12923 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common operations for RNN Estimators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import metrics
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# NOTE(jtbates): As of February 10, 2017, some of the `RNNKeys` have been
# removed and replaced with values from `prediction_key.PredictionKey`. The key
# `RNNKeys.PREDICTIONS_KEY` has been replaced by
# `prediction_key.PredictionKey.SCORES` for regression and
# `prediction_key.PredictionKey.CLASSES` for classification. The key
# `RNNKeys.PROBABILITIES_KEY` has been replaced by
# `prediction_key.PredictionKey.PROBABILITIES`.
class RNNKeys(object):
FINAL_STATE_KEY = 'final_state'
LABELS_KEY = '__labels__'
SEQUENCE_LENGTH_KEY = 'sequence_length'
STATE_PREFIX = 'rnn_cell_state'
class PredictionType(object):
"""Enum-like values for the type of prediction that the model makes.
"""
SINGLE_VALUE = 1
MULTIPLE_VALUE = 2
_CELL_TYPES = {'basic_rnn': contrib_rnn.BasicRNNCell,
'lstm': contrib_rnn.LSTMCell,
'gru': contrib_rnn.GRUCell,}
def _get_single_cell(cell_type, num_units):
"""Constructs and return a single `RNNCell`.
Args:
cell_type: Either a string identifying the `RNNCell` type or a subclass of
`RNNCell`.
num_units: The number of units in the `RNNCell`.
Returns:
An initialized `RNNCell`.
Raises:
ValueError: `cell_type` is an invalid `RNNCell` name.
TypeError: `cell_type` is not a string or a subclass of `RNNCell`.
"""
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, contrib_rnn.RNNCell):
raise ValueError('The supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def construct_rnn_cell(num_units, cell_type='basic_rnn',
dropout_keep_probabilities=None):
"""Constructs cells, applies dropout and assembles a `MultiRNNCell`.
The cell type chosen by DynamicRNNEstimator.__init__() is the same as
returned by this function when called with the same arguments.
Args:
num_units: A single `int` or a list/tuple of `int`s. The size of the
`RNNCell`s.
cell_type: A string identifying the `RNNCell` type or a subclass of
`RNNCell`.
dropout_keep_probabilities: a list of dropout probabilities or `None`. If a
list is given, it must have length `len(cell_type) + 1`.
Returns:
An initialized `RNNCell`.
"""
if not isinstance(num_units, (list, tuple)):
num_units = (num_units,)
cells = [_get_single_cell(cell_type, n) for n in num_units]
if dropout_keep_probabilities:
cells = apply_dropout(cells, dropout_keep_probabilities)
if len(cells) == 1:
return cells[0]
return contrib_rnn.MultiRNNCell(cells)
def apply_dropout(cells, dropout_keep_probabilities, random_seed=None):
"""Applies dropout to the outputs and inputs of `cell`.
Args:
cells: A list of `RNNCell`s.
dropout_keep_probabilities: a list whose elements are either floats in
`[0.0, 1.0]` or `None`. It must have length one greater than `cells`.
random_seed: Seed for random dropout.
Returns:
A list of `RNNCell`s, the result of applying the supplied dropouts.
Raises:
ValueError: If `len(dropout_keep_probabilities) != len(cells) + 1`.
"""
if len(dropout_keep_probabilities) != len(cells) + 1:
raise ValueError(
'The number of dropout probabilities must be one greater than the '
'number of cells. Got {} cells and {} dropout probabilities.'.format(
len(cells), len(dropout_keep_probabilities)))
wrapped_cells = [
contrib_rnn.DropoutWrapper(cell, prob, 1.0, seed=random_seed)
for cell, prob in zip(cells[:-1], dropout_keep_probabilities[:-2])
]
wrapped_cells.append(
contrib_rnn.DropoutWrapper(cells[-1], dropout_keep_probabilities[-2],
dropout_keep_probabilities[-1]))
return wrapped_cells
def get_eval_metric_ops(problem_type, prediction_type, sequence_length,
prediction_dict, labels):
"""Returns eval metric ops for given `problem_type` and `prediction_type`.
Args:
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
prediction_type: `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
prediction_dict: A dict of prediction tensors.
labels: The label `Tensor`.
Returns:
A `dict` mapping strings to the result of calling the metric_fn.
"""
eval_metric_ops = {}
if problem_type == constants.ProblemType.CLASSIFICATION:
# Multi value classification
if prediction_type == PredictionType.MULTIPLE_VALUE:
mask_predictions, mask_labels = mask_activations_and_labels(
prediction_dict[prediction_key.PredictionKey.CLASSES], labels,
sequence_length)
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=mask_predictions, labels=mask_labels)
# Single value classification
elif prediction_type == PredictionType.SINGLE_VALUE:
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=prediction_dict[prediction_key.PredictionKey.CLASSES],
labels=labels)
elif problem_type == constants.ProblemType.LINEAR_REGRESSION:
# Multi value regression
if prediction_type == PredictionType.MULTIPLE_VALUE:
pass
# Single value regression
elif prediction_type == PredictionType.SINGLE_VALUE:
pass
return eval_metric_ops
def select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
num_label_columns = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
reshaped_activations = array_ops.reshape(activations,
[-1, num_label_columns])
indices = math_ops.range(batch_size) * padded_length + sequence_lengths - 1
last_activations = array_ops.gather(reshaped_activations, indices)
last_activations.set_shape(
[activations.get_shape()[0], activations.get_shape()[2]])
return last_activations
def mask_activations_and_labels(activations, labels, sequence_lengths):
"""Remove entries outside `sequence_lengths` and returned flattened results.
Args:
activations: Output of the RNN, shape `[batch_size, padded_length, k]`.
labels: Label values, shape `[batch_size, padded_length]`.
sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded
length of each sequence. If `None`, then each sequence is unpadded.
Returns:
activations_masked: `logit` values with those beyond `sequence_lengths`
removed for each batch. Batches are then concatenated. Shape
`[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and
shape `[batch_size * padded_length, k]` otherwise.
labels_masked: Label values after removing unneeded entries. Shape
`[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape
`[batch_size * padded_length]` otherwise.
"""
with ops.name_scope(
'mask_activations_and_labels',
values=[activations, labels, sequence_lengths]):
labels_shape = array_ops.shape(labels)
batch_size = labels_shape[0]
padded_length = labels_shape[1]
if sequence_lengths is None:
flattened_dimension = padded_length * batch_size
activations_masked = array_ops.reshape(activations,
[flattened_dimension, -1])
labels_masked = array_ops.reshape(labels, [flattened_dimension])
else:
mask = array_ops.sequence_mask(sequence_lengths, padded_length)
activations_masked = array_ops.boolean_mask(activations, mask)
labels_masked = array_ops.boolean_mask(labels, mask)
return activations_masked, labels_masked
def multi_value_predictions(activations, target_column, problem_type,
predict_probabilities):
"""Maps `activations` from the RNN to predictions for multi value models.
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `prediction_key.PredictionKey.CLASSES` for
`problem_type` `ProblemType.CLASSIFICATION` or
`prediction_key.PredictionKey.SCORE` for `problem_type`
`ProblemType.LINEAR_REGRESSION`.
If `predict_probabilities` is `True`, it will contain a second entry with key
`prediction_key.PredictionKey.PROBABILITIES`. The
value of this entry is a `Tensor` of probabilities with shape
`[batch_size, padded_length, num_classes]`.
Note that variable length inputs will yield some predictions that don't have
meaning. For example, if `sequence_length = [3, 2]`, then prediction `[1, 2]`
has no meaningful interpretation.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
target_column: An initialized `TargetColumn`, calculate predictions.
problem_type: Either `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
predict_probabilities: A Python boolean, indicating whether probabilities
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('MultiValuePrediction'):
activations_shape = array_ops.shape(activations)
flattened_activations = array_ops.reshape(activations,
[-1, activations_shape[2]])
prediction_dict = {}
if predict_probabilities:
flat_probabilities = target_column.logits_to_predictions(
flattened_activations, proba=True)
flat_predictions = math_ops.argmax(flat_probabilities, 1)
if target_column.num_label_columns == 1:
probability_shape = array_ops.concat([activations_shape[:2], [2]], 0)
else:
probability_shape = activations_shape
probabilities = array_ops.reshape(
flat_probabilities,
probability_shape,
name=prediction_key.PredictionKey.PROBABILITIES)
prediction_dict[
prediction_key.PredictionKey.PROBABILITIES] = probabilities
else:
flat_predictions = target_column.logits_to_predictions(
flattened_activations, proba=False)
predictions_name = (prediction_key.PredictionKey.CLASSES
if problem_type == constants.ProblemType.CLASSIFICATION
else prediction_key.PredictionKey.SCORES)
predictions = array_ops.reshape(
flat_predictions, [activations_shape[0], activations_shape[1]],
name=predictions_name)
prediction_dict[predictions_name] = predictions
return prediction_dict
| apache-2.0 | 4,202,721,969,067,886,000 | 40.957792 | 93 | 0.693337 | false |
code-sauce/tensorflow | tensorflow/python/kernel_tests/substr_op_test.py | 55 | 8327 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Substr op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SubstrOpTest(test.TestCase):
def _testScalarString(self, dtype):
test_string = b"Hello"
position = np.array(1, dtype)
length = np.array(3, dtype)
expected_value = b"ell"
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testVectorStrings(self, dtype):
test_string = [b"Hello", b"World"]
position = np.array(1, dtype)
length = np.array(3, dtype)
expected_value = [b"ell", b"orl"]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testMatrixStrings(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array(1, dtype)
length = np.array(4, dtype)
expected_value = [[b"en", b"leve", b"welv"], [b"hirt", b"ourt", b"ifte"],
[b"ixte", b"even", b"ight"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testElementWisePosLen(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4], [4, 3, 2], [5, 5, 5]], dtype)
expected_value = [[b"en", b"eve", b"lve"], [b"hirt", b"urt", b"te"],
[b"ixtee", b"vente", b"hteen"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testBroadcast(self, dtype):
# Broadcast pos/len onto input string
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"],
[b"nineteen", b"twenty", b"twentyone"]]
position = np.array([1, 2, 3], dtype)
length = np.array([1, 2, 3], dtype)
expected_value = [[b"e", b"ev", b"lve"], [b"h", b"ur", b"tee"],
[b"i", b"ve", b"hte"], [b"i", b"en", b"nty"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Broadcast input string onto pos/len
test_string = [b"thirteen", b"fourteen", b"fifteen"]
position = np.array([[1, 2, 3], [3, 2, 1], [5, 5, 5]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
expected_value = [[b"hir", b"ur", b"t"], [b"r", b"ur", b"ift"],
[b"ee", b"ee", b"en"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Test 1D broadcast
test_string = b"thirteen"
position = np.array([1, 5, 7], dtype)
length = np.array([3, 2, 1], dtype)
expected_value = [b"hir", b"ee", b"n"]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testBadBroadcast(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([1, 2, 3, 4], dtype)
length = np.array([1, 2, 3, 4], dtype)
expected_value = [[b"e", b"ev", b"lve"], [b"h", b"ur", b"tee"],
[b"i", b"ve", b"hte"]]
with self.assertRaises(ValueError):
substr_op = string_ops.substr(test_string, position, length)
def _testOutOfRangeError(self, dtype):
# Scalar/Scalar
test_string = b"Hello"
position = np.array(7, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Vector/Scalar
test_string = [b"good", b"good", b"bad", b"good"]
position = np.array(3, dtype)
length = np.array(1, dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Negative pos
test_string = b"Hello"
position = np.array(-1, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Matrix/Matrix
test_string = [[b"good", b"good", b"good"], [b"good", b"good", b"bad"],
[b"good", b"good", b"good"]]
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Broadcast
test_string = [[b"good", b"good", b"good"], [b"good", b"good", b"bad"]]
position = np.array([1, 2, 3], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
def _testMismatchPosLenShapes(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([[1, 2, 3]], dtype)
length = np.array([2, 3, 4], dtype)
# Should fail: position/length have different rank
with self.assertRaises(ValueError):
substr_op = string_ops.substr(test_string, position, length)
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4]], dtype)
# Should fail: postion/length have different dimensionality
with self.assertRaises(ValueError):
substr_op = string_ops.substr(test_string, position, length)
def _testAll(self, dtype):
self._testScalarString(dtype)
self._testVectorStrings(dtype)
self._testMatrixStrings(dtype)
self._testElementWisePosLen(dtype)
self._testBroadcast(dtype)
self._testBadBroadcast(dtype)
self._testOutOfRangeError(dtype)
self._testMismatchPosLenShapes(dtype)
def testInt32(self):
self._testAll(np.int32)
def testInt64(self):
self._testAll(np.int64)
def testWrongDtype(self):
with self.test_session():
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3.0, 1)
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3, 1.0)
if __name__ == "__main__":
test.main()
| apache-2.0 | -1,049,360,549,842,419,200 | 37.730233 | 80 | 0.609223 | false |
marcoscaceres/bedrock | bedrock/base/tests/test_middleware.py | 28 | 2690 | from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from bedrock.base.middleware import LocaleURLMiddleware
@override_settings(DEV=True)
class TestLocaleURLMiddleware(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.middleware = LocaleURLMiddleware()
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=())
def test_redirects_to_correct_language(self):
"""Should redirect to lang prefixed url."""
path = '/the/dude/'
req = self.rf.get(path, HTTP_ACCEPT_LANGUAGE='de')
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'], '/de' + path)
@override_settings(DEV_LANGUAGES=('es', 'fr'),
LANGUAGE_CODE='en-US',
FF_EXEMPT_LANG_PARAM_URLS=())
def test_redirects_to_default_language(self):
"""Should redirect to default lang if not in settings."""
path = '/the/dude/'
req = self.rf.get(path, HTTP_ACCEPT_LANGUAGE='de')
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'], '/en-US' + path)
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=('/other/',))
def test_redirects_lang_param(self):
"""Middleware should remove the lang param on redirect."""
path = '/fr/the/dude/'
req = self.rf.get(path, {'lang': 'de'})
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'], '/de/the/dude/')
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=('/dude/',))
def test_no_redirect_lang_param(self):
"""Middleware should not redirect when exempt."""
path = '/fr/the/dude/'
req = self.rf.get(path, {'lang': 'de'})
resp = LocaleURLMiddleware().process_request(req)
self.assertIs(resp, None) # no redirect
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=())
def test_redirects_to_correct_language_despite_unicode_errors(self):
"""Should redirect to lang prefixed url, stripping invalid chars."""
path = '/the/dude/'
corrupt_querystring = '?a\xa4\x91b\xa4\x91i\xc0de=s'
corrected_querystring = '?abide=s'
req = self.rf.get(path + corrupt_querystring,
HTTP_ACCEPT_LANGUAGE='de')
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'],
'/de' + path + corrected_querystring)
| mpl-2.0 | -9,013,458,169,412,726,000 | 43.098361 | 76 | 0.603717 | false |
laperry1/android_external_chromium_org | native_client_sdk/src/tools/tests/create_html_test.py | 108 | 1808 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import shutil
import sys
import tempfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(SCRIPT_DIR)
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(PARENT_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, "third_party", "pymock")
sys.path.append(PARENT_DIR)
sys.path.append(MOCK_DIR)
import create_html
import mock
class TestCreateHtml(unittest.TestCase):
def setUp(self):
self.tempdir = None
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def testBadInput(self):
# Non-existant file
self.assertRaises(create_html.Error, create_html.main, ['foo.nexe'])
# Existing file with wrong extension
self.assertRaises(create_html.Error, create_html.main, [__file__])
# Existing directory
self.assertRaises(create_html.Error, create_html.main, [PARENT_DIR])
def testCreatesOutput(self):
self.tempdir = tempfile.mkdtemp("_sdktest")
expected_html = os.path.join(self.tempdir, 'foo.html')
nmf_file = os.path.join(self.tempdir, 'foo.nmf')
with mock.patch('sys.stdout'):
with mock.patch('os.path.exists'):
with mock.patch('os.path.isfile'):
options = mock.MagicMock(return_value=False)
options.output = None
create_html.CreateHTML([nmf_file], options)
# Assert that the file was created
self.assertTrue(os.path.exists(expected_html))
# Assert that nothing else was created
self.assertEqual(os.listdir(self.tempdir),
[os.path.basename(expected_html)])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -6,435,832,612,186,421,000 | 31.285714 | 74 | 0.693031 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.