repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Rotendahl/DormitoryLife | DormitoryDollars/DormitoryDollars/settings.py | 1 | 3717 | """
Django settings for DormitoryDollars project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0sz8*uob2odgdon!8l6uwv)k79m7s5i7vl22j3ta$6q+bl!d&a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'DormitoryDollars.cashier',
'bootstrap3',
'flat_responsive'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DormitoryDollars.DormitoryDollars.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DormitoryDollars.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Copenhagen'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
#STATIC_URL = '/static/'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT + 'DormitoryDollars/', 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| gpl-3.0 | -2,950,938,890,999,165,400 | 25.934783 | 91 | 0.699489 | false |
ozgurturkiye/istihzapython | 24.Sayilar/sayilar.py | 1 | 2190 | #Python’da üç tür sayı olduğunu biliyoruz:
#1.Tam Sayılar (integers)
#2.Kayan Noktalı Sayılar (floating point numbers veya kısaca floats)
#3.Karmaşık Sayılar (complex numbers)
"""
Fonksiyon Görevi Örnek
int() Bir veriyi tam sayıya dönüştürür int('2')
float() Bir veriyi kayan noktalı sayıya dönüştürür float(2)
complex() Bir veriyi karmaşık sayıya dönüştürür complex(2)
"""
#Tam Sayıların Metotları
#bit_length() integer sayının bellekte kaç bit yer kapladığını söyler
sayı = 10
sayı.bit_length() #output : 4
(10).bit_length() #output : 4 Dikkat sayıyı parantez içine almazsan hata verir sebebi sayıyı float olarak algılaması
#Kayan Noktalı Sayıların Metotları
#as_integer_ratio()
#is_integer() Bir kayan noktalı sayının ondalık kısmında 0 harici bir sayının olup olmadığını kontrol etmek için
(12.0).is_integer() # output: True
#Karmaşık Sayıların Metotları
#imag İşte imag adlı nitelik, bize bir karmaşık sayının sanal kısmını verir:
#real real adlı nitelik bize bir karmaşık sayının gerçek kısmını verir:
c = 12+4j
c.imag #output: 4.0
c.real #output: 12.0
# Bu iki metot (imag ve real) parantezsiz kullanılıyor, eğer parantezli yazarsak c değişkenini float sanıyor
## Aslında real ve imag birer nitelik, metot değil o yüzden () parantezler yok. Nitelik ve metot farkı için ileri
## konularda açıklık gelecektir.
#Aritmetik Fonksiyonlar
##Bilgi: Gömülü fonksiyonlar, Python programlama dilinde,
###herhangi bir özel işlem yapmamıza gerek olmadan, kodlarımız içinde doğrudan kullanabileceğimiz fonksiyonlardır.
#abs() Bu fonksiyon bize bir sayının mutlak değerini verir:
#divmod() Bu fonksiyon, bir sayının bir sayıya bölünmesi işleminde bölümü ve kalanı verir:
##Bu sonuçtan gördüğünüz gibi, aslında divmod() fonksiyonu şu kodlarla aynı işi yapıyor:
14 // 3, 14 % 3
#max()
#min()
#sum() Bu fonksiyon bir dizi içinde yer alan bütün sayıları birbiriyle toplar.
a = [10, 20, 43, 45 , 77, 2, 0, 1]
print(sum(a)) #output: 198
| gpl-3.0 | -9,210,819,391,998,454,000 | 44.266667 | 118 | 0.73245 | false |
vasekhodina/blivet_gv_visualization | actionsProcessor.py | 1 | 2727 | import emoji
import pallete
import node
import gvInput
class ActionsProcessor():
""" Class containing neccessary methods for putting actions scheduled by blivet into graph"""
def __init__(self, actions_list, node_list, edge_list, path_to_pallete):
self.actions = actions_list
self.node_list = node_list
self.edge_list = edge_list
self.pallete = pallete.Pallete(path_to_pallete)
self.gv_input = gvInput.GvInput(node_list, edge_list, path_to_pallete)
def process_actions(self):
""" Main method for processing actions """
for action in self.actions:
found_node = self.find_node(action.device.name)
if not found_node:
found_node = node.Node(action.device.name, action.device.type, action.device.format, action.device.size, action.device.path)
self.gv_input.process_node(found_node,action.device)
self.process_action(action, found_node)
def process_action(self, action, node):
""" Helper function for processing actions, finds out what does each action do and sets the appropriate attributed of the node
:param obj action The action to be processed.
:param obj node The node to be changed."""
if action.isFormat:
node.add_emoji("Fmt:")
node.addAttribute("action", "Format")
else:
node.add_emoji("Dev:")
node.addAttribute("action", "Device")
if action.isDestroy or action.isRemove:
print("Adding action: Delete for node: " + node.getName())
node.addAttribute("action", "delete")
node.change_color(self.pallete.complement["2"])
node.add_emoji(emoji.emojize(":fire:"))
if action.isCreate or action.isAdd:
print("Adding action: Add for node: " + node.getName())
node.addAttribute("action", "add")
node.change_color(self.pallete.primary["2"])
node.add_emoji(emoji.emojize(":building_construction:"))
if action.isResize or action.isShrink or action.isGrow:
print("Adding action: Resize for node: " + node.getName())
node.addAttribute("action", "resize")
node.change_color(self.pallete.secondary_second["2"])
node.add_emoji(emoji.emojize(":wrench:"))
def find_node(self, dev_name):
""" Helper function that searches node_list for a node using it's name
:param str dev_name The name of node / device that should be looked up."""
for found_node in self.node_list:
if found_node.getName() == dev_name:
print("Found node: " + found_node.getName())
return found_node
| gpl-2.0 | -2,745,088,409,339,983,400 | 47.696429 | 140 | 0.623396 | false |
mduggan/toumeika | shikin/pdf/tmpdir.py | 1 | 3164 | """
A backport of the Python 3.2 TemporaryDirectory object.
From:
https://stackoverflow.com/questions/19296146/tempfile-temporarydirectory-context-manager-in-python-2-7
"""
from __future__ import print_function
import warnings as _warnings
import os as _os
import sys as _sys
from tempfile import mkdtemp
class ResourceWarning(Exception):
pass
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix="tmp", dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except OSError:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except OSError:
pass
try:
self._rmdir(path)
except OSError:
pass
| bsd-2-clause | -8,551,097,888,931,761,000 | 30.64 | 102 | 0.578698 | false |
tailhook/aio-hs2 | thrift/TTornado.py | 1 | 5558 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from io import StringIO, BytesIO
import logging
import socket
import struct
from thrift.transport import TTransport
from thrift.transport.TTransport import TTransportException
from tornado import gen
from tornado import iostream
from tornado import tcpserver
class TTornadoStreamTransport(TTransport.TTransportBase):
"""a framed, buffered transport over a Tornado stream"""
def __init__(self, host, port, stream=None):
self.host = host
self.port = port
self.is_queuing_reads = False
self.read_queue = []
self.__wbuf = BytesIO()
# servers provide a ready-to-go stream
self.stream = stream
if self.stream is not None:
self._set_close_callback()
# not the same number of parameters as TTransportBase.open
def open(self, callback):
logging.debug('socket connecting')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.stream = iostream.IOStream(sock)
def on_close_in_connect(*_):
message = 'could not connect to {}:{}'.format(self.host, self.port)
raise TTransportException(
type=TTransportException.NOT_OPEN,
message=message)
self.stream.set_close_callback(on_close_in_connect)
def finish(*_):
self._set_close_callback()
callback()
self.stream.connect((self.host, self.port), callback=finish)
def _set_close_callback(self):
def on_close():
raise TTransportException(
type=TTransportException.END_OF_FILE,
message='socket closed')
self.stream.set_close_callback(self.close)
def close(self):
# don't raise if we intend to close
self.stream.set_close_callback(None)
self.stream.close()
def read(self, _):
# The generated code for Tornado shouldn't do individual reads -- only
# frames at a time
assert "you're doing it wrong" is True
@gen.engine
def readFrame(self, callback):
self.read_queue.append(callback)
logging.debug('read queue: %s', self.read_queue)
if self.is_queuing_reads:
# If a read is already in flight, then the while loop below should
# pull it from self.read_queue
return
self.is_queuing_reads = True
while self.read_queue:
next_callback = self.read_queue.pop()
result = yield gen.Task(self._readFrameFromStream)
next_callback(result)
self.is_queuing_reads = False
@gen.engine
def _readFrameFromStream(self, callback):
logging.debug('_readFrameFromStream')
frame_header = yield gen.Task(self.stream.read_bytes, 4)
frame_length, = struct.unpack('!i', frame_header)
logging.debug('received frame header, frame length = %i', frame_length)
frame = yield gen.Task(self.stream.read_bytes, frame_length)
logging.debug('received frame payload')
callback(frame)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self, callback=None):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BytesIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = struct.pack("!i", wsz) + wout
logging.debug('writing frame length = %i', wsz)
self.stream.write(buf, callback)
class TTornadoServer(tcpserver.TCPServer):
def __init__(self, processor, iprot_factory, oprot_factory=None,
*args, **kwargs):
super(TTornadoServer, self).__init__(*args, **kwargs)
self._processor = processor
self._iprot_factory = iprot_factory
self._oprot_factory = (oprot_factory if oprot_factory is not None
else iprot_factory)
def handle_stream(self, stream, address):
try:
host, port = address
trans = TTornadoStreamTransport(host=host, port=port, stream=stream)
oprot = self._oprot_factory.getProtocol(trans)
def next_pass():
if not trans.stream.closed():
self._processor.process(trans, self._iprot_factory, oprot,
callback=next_pass)
next_pass()
except Exception:
logging.exception('thrift exception in handle_stream')
trans.close()
| apache-2.0 | 938,367,796,010,596,400 | 35.565789 | 80 | 0.638719 | false |
iksaif/lugdulov | python/cyclocity.py | 1 | 11439 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cyclocity.py
Copyright (C) 2010 Patrick Installé <[email protected]>
Copyright (C) 2010-2011 Corentin Chary <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
JCDecaux CycloCity
"""
import sys
import os
import re
import xml.dom.minidom
import datetime
from plugin import *
class CycloCity(Provider):
config = [
{
'city_uid' : 'cergy',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Cergy-Pontoise',
'bike_name' : 'Velo2',
'server' : 'www.velo2.cergypontoise.fr',
'lat': 49.0485219,
'lng': 2.0342372
},
{
'city_uid' : 'paris',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Paris',
'bike_name' : u'Vélib\'',
'server' : 'www.velib.paris.fr',
'lat': 48.8566667,
'lng': 2.3509871
},
{
'city_uid' : 'creteil',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : u'Créteil',
'bike_name' : 'Cristolib',
'server' : 'www.cristolib.fr',
'lat': 48.7896130,
'lng': 2.4526276
},
{
'city_uid' : 'amiens',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Amiens',
'bike_name' : 'Velam',
'server' : 'www.velam.amiens.fr',
'lat': 49.8939183,
'lng': 2.2942436
},
{
'city_uid' : 'besancon',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : u'Besançon',
'bike_name' : u'VéloCité',
'server' : 'www.velocite.besancon.fr',
'lat': 47.2412254,
'lng': 6.0255656
},
{
'city_uid' : 'marseille',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Marseille',
'bike_name' : u'Le Vélo',
'server' : 'www.levelo-mpm.fr',
'lat': 43.2976116,
'lng': 5.3810421
},
{
'city_uid' : 'mulhouse',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Mulhouse',
'bike_name' : u'VéloCité',
'server' : 'www.velocite.mulhouse.fr',
'lat': 47.7494919,
'lng': 7.3397806
},
{
'city_uid' : 'nancy',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Nancy',
'bike_name' : u'vélOStan',
'server' : 'www.velostanlib.fr',
'lat': 48.6907887,
'lng': 6.1825044
},
{
'city_uid' : 'nantes',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Nantes',
'bike_name' : 'Bicloo',
'server' : 'www.bicloo.nantesmetropole.fr',
'lat': 47.2168424,
'lng': -1.5567445
},
{
'city_uid' : 'rouen',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Rouen',
'bike_name' : 'Cyclic',
'server' : 'cyclic.rouen.fr',
'lat': 49.4423668,
'lng': 1.0984924
},
{
'city_uid' : 'toulouse',
'country_uid' : 'fr',
'country_Name' : 'France',
'city_Name' : 'Toulouse',
'bike_name' : u'Vélouse',
'server' : 'www.velo.toulouse.fr',
'lat': 43.6043630,
'lng': 1.4429513
},
{
'city_uid' : 'bruxelles',
'country_uid' : 'be',
'country_Name' : 'Belgium',
'city_Name' : 'Bruxelles',
'bike_name' : 'Villo',
'server' : 'www.villo.be',
'lat': 50.8462807,
'lng': 4.3547273
},
{
'city_uid' : 'dublin',
'country_uid' : 'ir',
'country_Name' : 'Ireland',
'city_Name' : 'Dublin',
'bike_name' : 'Dubline Bikes',
'server' : 'www.dublinbikes.ie',
'lat': 53.3441040,
'lng': -6.2674937
},
{
'city_uid' : 'luxembourg',
'country_uid' : 'lu',
'country_Name' : 'Luxembourg',
'city_Name' : 'Luxembourg',
'bike_name' : 'Veloh',
'server' : 'www.veloh.lu',
'lat': 49.6100036,
'lng': 6.1295960
},
{
'city_uid' : 'valence',
'country_uid' : 'es',
'country_Name' : 'Spain',
'city_Name' : 'Valencia',
'bike_name' : 'Valenbisi',
'lat' : 39.459258,
'lng' : -0.376453,
'server' : 'www.valenbisi.es'
},
{
'city_uid' : 'santander',
'country_uid' : 'es',
'country_Name' : 'Spain',
'city_Name' : 'Santander',
'bike_name' : 'Tusbic',
'server' : 'www.tusbic.es',
'lat': 43.4609602,
'lng': -3.8079336
},
{
'city_uid' : 'seville',
'country_uid' : 'es',
'country_Name' : 'Spain',
'city_Name' : u'Séville',
'bike_name' : 'Sevici',
'server' : 'www.sevici.es',
'lat': 37.3826400,
'lng': -5.9962951
},
{
'city_uid' : 'toyama',
'country_uid' : 'jp',
'country_Name' : 'Japan',
'city_Name' : 'Toyama',
'bike_name' : 'Cyclocity',
'server' : 'www.cyclocity.jp',
'lat': 36.6959518,
'lng': 137.2136768
},
{
'city_uid' : 'brisbane',
'country_uid' : 'au',
'country_Name' : 'Australia',
'city_Name' : 'Brisbane',
'bike_name' : 'CycloCycle',
'server' : 'www.citycycle.com.au',
'lat': -27.47538,
'lng': 153.019466
},
{
'city_uid' : 'ljubljana',
'country_uid' : 'si',
'country_Name' : 'Slovenia',
'city_Name' : 'Ljubljana',
'bike_name' : 'Bicike(lj)',
'server' : 'www.bicikelj.si',
'lat': 46.055556,
'lng': 14.508333
},
]
cache = {}
def service_by_country(self, country):
for service in self.config:
if country.uid == service['country_uid']:
return service
return None
def service_by_city(self, city):
for service in self.config:
if city.uid == service['city_uid']:
return service
return None
def get_countries(self):
ret = []
done = {}
for service in self.config:
if service['country_uid'] in done:
continue
done[service['country_uid']] = True
country = Country()
country.uid = service['country_uid']
country.name = service['country_Name']
ret.append(country)
return ret
def get_cities(self, country):
ret = []
for service in self.config:
if country.uid != service['country_uid']:
continue
city = City()
city.uid = service['city_uid']
city.id = city.uid
city.name = service['city_Name']
city.bikeName = service['bike_name']
city.lat = service['lat']
city.lng = service['lng']
city.create_rect()
city.type = "CycloCity"
city.infos = 'http://' + service['server'] + '/service/carto'
city.status = 'http://' + service['server'] + '/service/stationdetails/' + city.uid + '/%1'
#city.rect = self.get_city_bike_zone(service, city)
ret.append(city)
return ret
def get_carto(self, service, city):
if service['server'] in self.cache:
return self.cache[service['server']]
url = city.infos
fp = urlopen(url)
data = fp.read()
dom = xml.dom.minidom.parseString(data)
self.cache[service['server']] = dom
return dom
def get_stations(self, city):
stations = None
service = self.service_by_city(city)
dom = self.get_carto(service, city)
stations = []
markers = dom.getElementsByTagName("marker")
for marker in markers:
station = Station()
station.name = marker.getAttribute('name')
station.uid = marker.getAttribute('number')
station.id = station.uid
station.description = marker.getAttribute('address')
# marker.getAttribute('fullAddress')
station.zone = marker.getAttribute('arrondissement')
station.lat = float(marker.getAttribute('lat'))
station.lng = float(marker.getAttribute('lng'))
if city.contains((station.lat, station.lng)):
stations.append(station)
return stations
def get_status(self, station, city):
service = self.service_by_city(city)
url = city.status.replace('%1', station.id)
fp = urlopen(url)
data = fp.read()
dom = xml.dom.minidom.parseString(data)
node = dom.getElementsByTagName("station")[0]
status = {}
for elem in ["available", "free", "total", "ticket"]:
status[elem] = int(node.getElementsByTagName(elem)[0].childNodes[0].data)
station.ticket = status['ticket']
station.bikes = status['available']
station.slots = status['free']
return station
def dump_city(self, city):
service = self.service_by_city(city)
city.rect = self.get_city_bike_zone(service, city)
data = self._dump_city(city)
print data
def dump_stations(self, city):
service = self.service_by_city(city)
city.rect = self.get_city_bike_zone(service, city)
data = self._dump_stations(city)
print data.encode('utf8')
def test():
prov = CycloCity()
prov.selftest()
def main():
test()
if __name__ == '__main__':
main()
| gpl-2.0 | -1,974,099,777,166,409,200 | 29.96748 | 104 | 0.465914 | false |
suutari-ai/shoop | shuup_tests/browser/front/test_search_view.py | 3 | 5711 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import os
import pytest
from django.core.urlresolvers import reverse
from django.utils.translation import activate
from shuup.core import cache
from shuup.core.models import (
Category, CategoryStatus, Manufacturer, Product, ProductMode,
ProductVariationVariable, ProductVariationVariableValue, ShopProduct
)
from shuup.front.utils.sorts_and_filters import (
set_configuration
)
from shuup.testing.browser_utils import (
click_element, move_to_element, wait_until_condition,
wait_until_disappeared
)
from shuup.testing.factories import (
create_product, get_default_category, get_default_shop,
get_default_supplier
)
from shuup.testing.utils import initialize_front_browser_test
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
PRODUCT_DATA = [
("Test Product", "sku-1", 123),
("A Test Product", "sku-2", 720),
("XTest Product", "sku-3", 1),
("Test", "sku-4", 42),
("Product", "sku-5", 434),
("Xtest", "sku-6", 3),
("A", "sku-7", 99),
("xtest", "sku-8", 999),
("a", "sku-9", 42),
("test", "sku-10", 53),
("product", "sku-11", 34),
]
def create_orderable_product(name, sku, price):
supplier = get_default_supplier()
shop = get_default_shop()
product = create_product(sku=sku, shop=shop, supplier=supplier, default_price=price, name=name)
return product
@pytest.mark.browser
@pytest.mark.djangodb
def test_search_product_list(browser, live_server, settings):
activate("en")
# initialize
cache.clear()
shop = get_default_shop()
for name, sku, price in PRODUCT_DATA:
create_orderable_product(name, sku, price=price)
# initialize test and go to front page
browser = initialize_front_browser_test(browser, live_server)
# check that front page actually loaded
wait_until_condition(browser, lambda x: x.is_text_present("Welcome to Default!"))
url = reverse("shuup:product_search")
browser.visit("%s%s?q=test product" % (live_server, url))
wait_until_condition(browser, lambda x: len(x.find_by_css(".product-card")) == 9)
check_default_ordering(browser)
# basic_sorting_test(browser)
second_test_query(browser, live_server, url)
def check_default_ordering(browser):
expected_first_prod_id = "product-%s" % Product.objects.filter(sku="sku-1").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card").first["id"] == expected_first_prod_id)
expected_second_prod_id = "product-%s" % Product.objects.filter(sku="sku-3").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card")[1]["id"] == expected_second_prod_id)
expected_third_prod_id = "product-%s" % Product.objects.filter(sku="sku-2").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card")[2]["id"] == expected_third_prod_id)
def basic_sorting_test(browser):
# Sort from Z to A
click_element(browser, "button[data-id='id_sort']")
click_element(browser, "li[data-original-index='1'] a")
expected_first_prod_id = "product-%s" % Product.objects.filter(sku="sku-3").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card").first["id"] == expected_first_prod_id)
# Sort by price (highest first)
click_element(browser, "button[data-id='id_sort']")
click_element(browser, "li[data-original-index='3'] a")
expected_first_prod_id = "product-%s" % Product.objects.filter(sku="sku-8").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card").first["id"] == expected_first_prod_id)
# Sort by price (lowest first)
click_element(browser, "button[data-id='id_sort']")
click_element(browser, "li[data-original-index='2'] a")
expected_first_prod_id = "product-%s" % Product.objects.filter(sku="sku-3").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card").first["id"] == expected_first_prod_id)
# Sort from A to Z
click_element(browser, "button[data-id='id_sort']")
click_element(browser, "li[data-original-index='0'] a")
expected_first_prod_id = "product-%s" % Product.objects.filter(sku="sku-2").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card").first["id"] == expected_first_prod_id)
def second_test_query(browser, live_server, url):
browser.visit("%s%s?q=Test" % (live_server, url))
wait_until_condition(browser, lambda x: len(x.find_by_css(".product-card")) == 7)
expected_first_prod_id = "product-%s" % Product.objects.filter(sku="sku-4").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card").first["id"] == expected_first_prod_id)
expected_second_prod_id = "product-%s" % Product.objects.filter(sku="sku-10").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card")[1]["id"] == expected_second_prod_id)
expected_third_prod_id = "product-%s" % Product.objects.filter(sku="sku-8").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card")[2]["id"] == expected_third_prod_id)
expected_last_prod_id = "product-%s" % Product.objects.filter(sku="sku-2").first().id
wait_until_condition(
browser, lambda x: x.find_by_css(".product-card").last["id"] == expected_last_prod_id)
| agpl-3.0 | 7,045,612,738,670,257,000 | 38.116438 | 114 | 0.667484 | false |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/skia/infra/bots/assets/win_toolchain/create.py | 2 | 4349 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download an updated VS toolchain"""
import argparse
import common
import json
import os
import shlex
import shutil
import subprocess
import sys
import utils
import win_toolchain_utils
# By default the toolchain includes a bunch of unnecessary stuff with long path
# names. Trim out directories with these names.
IGNORE_LIST = [
'WindowsMobile',
'App Certification Kit',
'Debuggers',
'Extension SDKs',
'winrt',
'DesignTime',
'AccChecker',
]
REPO_CHROME = 'https://chromium.googlesource.com/chromium/src.git'
def filter_toolchain_files(dirname, files):
"""Callback for shutil.copytree. Return lists of files to skip."""
split = dirname.split(os.path.sep)
for ign in IGNORE_LIST:
if ign in split:
print 'Ignoring dir %s' % dirname
return files
return []
def get_toolchain_dir(toolchain_dir_output):
"""Find the toolchain directory."""
prefix = 'vs_path = '
for line in toolchain_dir_output.splitlines():
if line.startswith(prefix):
return line[len(prefix):].strip('"')
raise Exception('Unable to find toolchain dir in output:\n%s' % (
toolchain_dir_output))
def gen_toolchain(chrome_path, msvs_version, target_dir):
"""Update the VS toolchain and copy it to the target_dir."""
with utils.chdir(os.path.join(chrome_path, 'src')):
subprocess.check_call([utils.GCLIENT, 'sync'])
depot_tools = subprocess.check_output([
'python', os.path.join('build', 'find_depot_tools.py')]).rstrip()
with utils.git_branch():
vs_toolchain_py = os.path.join('build', 'vs_toolchain.py')
env = os.environ.copy()
env['GYP_MSVS_VERSION'] = msvs_version
subprocess.check_call(['python', vs_toolchain_py, 'update'], env=env)
output = subprocess.check_output(['python', vs_toolchain_py,
'get_toolchain_dir'], env=env).rstrip()
src_dir = get_toolchain_dir(output)
# Mock out absolute paths in win_toolchain.json.
win_toolchain_utils.abstract(os.path.join('build', 'win_toolchain.json'),
os.path.dirname(depot_tools))
# Copy the toolchain files to the target_dir.
build = os.path.join(os.getcwd(), 'build')
dst_build = os.path.join(target_dir, 'src', 'build')
os.makedirs(dst_build)
for f in ('find_depot_tools.py', 'vs_toolchain.py', 'win_toolchain.json'):
shutil.copyfile(os.path.join(build, f), os.path.join(dst_build, f))
shutil.copytree(os.path.join(os.getcwd(), 'tools', 'gyp', 'pylib'),
os.path.join(target_dir, 'src', 'tools', 'gyp', 'pylib'))
dst_depot_tools = os.path.join(target_dir, 'depot_tools')
os.makedirs(dst_depot_tools)
for f in ('gclient.py', 'breakpad.py'):
shutil.copyfile(os.path.join(depot_tools, f),
os.path.join(dst_depot_tools, f))
toolchain_dst = os.path.join(
target_dir, 'depot_tools', os.path.relpath(src_dir, depot_tools))
shutil.copytree(src_dir, toolchain_dst, ignore=filter_toolchain_files)
def create_asset(target_dir, msvs_version, chrome_path=None):
"""Create the asset."""
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
with utils.tmp_dir() as tmp_dir:
if not chrome_path:
print ('Syncing Chrome from scratch. If you already have a checkout, '
'specify --chrome_path to save time.')
chrome_path = os.path.join(tmp_dir.name, 'src')
if not os.path.isdir(chrome_path):
subprocess.check_call([utils.GCLIENT, 'config', REPO_CHROME, '--managed'])
subprocess.check_call([utils.GCLIENT, 'sync'])
gen_toolchain(chrome_path, msvs_version, target_dir)
def main():
if sys.platform != 'win32':
print >> sys.stderr, 'This script only runs on Windows.'
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument('--msvs_version', required=True)
parser.add_argument('--chrome_path')
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
target_dir = os.path.abspath(args.target_dir)
create_asset(target_dir, args.msvs_version, args.chrome_path)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,715,652,742,450,703,400 | 32.976563 | 80 | 0.650954 | false |
jpirko/lnst | lnst/Common/LoggingHandler.py | 1 | 2216 | """
Custom logging handlers we use.
LogBuffer
Handler used solely for temporarily storing messages so that they can be
retrieved later.
Copyright 2012 Red Hat, Inc.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
[email protected] (Ondrej Lichtner)
"""
import pickle
import logging
import xmlrpc.client
from lnst.Common.ConnectionHandler import send_data
class LogBuffer(logging.Handler):
"""
Handler used for buffering log messages. Compared to the BufferingHandler
defined in Python it doesn't have a capacity. It is intended to be used
solely as a temporary storage of logged messages so that they can be later
retrieved.
"""
def __init__(self):
logging.Handler.__init__(self)
self.buffer = []
def makePickle(self, record):
"""
Pickles the record so that it can be sent over the xmlrpc we use.
"""
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
s = pickle.dumps(d, 1)
return xmlrpc.client.Binary(s)
def add_buffer(self, buf):
for i in buf:
self.buffer.append(i)
def emit(self, record):
s = self.makePickle(record)
self.buffer.append(s)
def flush(self):
self.acquire()
buf = list(self.buffer)
self.buffer = []
self.release()
return buf
def close(self):
self.flush()
logging.Handler.close(self)
class TransmitHandler(logging.Handler):
def __init__(self, target):
logging.Handler.__init__(self)
self.target = target
self._origin_name = None
def set_origin_name(self, name):
self._origin_name = name
def emit(self, record):
r = dict(record.__dict__)
r['msg'] = record.getMessage()
r['args'] = None
r['exc_info'] = None
if self._origin_name != None:
r['origin_name'] = self._origin_name
data = {"type": "log", "record": r}
send_data(self.target, data)
def close(self):
logging.Handler.close(self)
| gpl-2.0 | 2,165,201,360,103,899,000 | 24.471264 | 78 | 0.607852 | false |
SmartElect/SmartElect | rollgen/tests/test_generate_pdf_ed.py | 1 | 25258 | # 3rd party imports
from bidi.algorithm import get_display as apply_bidi
# Django imports
from django.conf import settings
# Project imports
from .factories import create_voters, generate_arabic_place_name
from .base import TestGeneratePdfBase
from .utils_for_tests import NBSP, extract_pdf_page, extract_textlines, clean_textlines, \
unwrap_lines
from ..arabic_reshaper import reshape
from ..generate_pdf_ed import generate_pdf_station_sign, generate_pdf_station_book, \
generate_pdf_center_list, station_name_range
from ..utils import truncate_center_name, format_name
from libya_elections.constants import ARABIC_COMMA, MALE, FEMALE, UNISEX
from register.tests.factories import RegistrationCenterFactory
def format_station_name_range_lines(lines):
"""Given a list of lines from station_name_range(), format them as expected output"""
# station_name_range() returns a list of lists; the inner lists each have 3 items and
# consist of [voter name, voter number, 'first' or 'last' (in Arabic)].
formatted = []
for line in lines:
if line:
name, number, first_or_last = line
formatted.append(apply_bidi(name) + str(number) + apply_bidi(first_or_last))
return formatted
class TestGeneratePdfEdPageCounts(TestGeneratePdfBase):
"""Exercises generate_pdf_ed.py with regard to page counts"""
def test_generate_pdf_station_book_unisex(self):
"""test generating a station book for a unisex station"""
# The # of males and females must be < UNISEX_TRIGGER and also not an even multiple
# of settings.ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_BOOK (to trigger full test coverage).
n_males = n_females = settings.ROLLGEN_UNISEX_TRIGGER - 1
male = create_voters(n_males, MALE)
female = create_voters(n_females, FEMALE)
voter_roll = male + female
station = self.run_station_distributor(voter_roll, 1)[0]
n_pages = generate_pdf_station_book(self.filename, station)
self.assertFileExists(self.filename)
# 5 pages = cover + 2 page males + 2 page females
self.assertEqual(n_pages, 5)
def test_generate_pdf_center_list_single_gender(self):
"""test generating a center list for a single gender station"""
voter_roll = create_voters(10, MALE)
stations = self.run_station_distributor(voter_roll, 1)
n_pages = generate_pdf_center_list(self.filename, stations, MALE)
self.assertFileExists(self.filename)
# 2 pages = cover + 1 page males
self.assertEqual(n_pages, 2)
def test_generate_pdf_center_list_unisex(self):
"""test generating a center list for a unisex station"""
n_voters = (settings.ROLLGEN_UNISEX_TRIGGER - 1) * 2
voter_roll = create_voters(n_voters)
stations = self.run_station_distributor(voter_roll, 1)
n_pages = generate_pdf_center_list(self.filename, stations, UNISEX)
self.assertFileExists(self.filename)
# 5 pages = cover + 2 page males + 2 page females
self.assertEqual(n_pages, 5)
def test_generate_pdf_center_list_multiple_genders(self):
"""test generating a center list w/male & female stations"""
n_voters = (settings.ROLLGEN_UNISEX_TRIGGER + 1) * 2
voter_roll = create_voters(n_voters)
stations = self.run_station_distributor(voter_roll, 2)
n_pages = generate_pdf_center_list(self.filename, stations, MALE)
self.assertFileExists(self.filename)
# 3 pages = cover + 2 pages males
self.assertEqual(n_pages, 3)
n_pages = generate_pdf_center_list(self.filename, stations, FEMALE)
self.assertFileExists(self.filename)
# 3 pages = cover + 2 page females
self.assertEqual(n_pages, 3)
def test_generate_pdf_center_list_multiple_genders_multiple_stations(self):
"""test generating a center list w/multiple male & female stations
This runs a code path that's not run when there's only one station of each gender.
"""
# Ensure enough voters of each gender to spill over into a second station.
n_males = n_females = settings.ROLLGEN_REGISTRANTS_PER_STATION_MAX + 1
n_voters = n_males + n_females
voter_roll = create_voters(n_voters)
stations = self.run_station_distributor(voter_roll, 4)
# Check males
n_pages_actual = generate_pdf_center_list(self.filename, stations, MALE)
self.assertFileExists(self.filename)
# Roughly, n_pages_expected = cover + n_males / ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_LIST
# However there's a page break at the end of each station so in the middle of the PDF where
# it transitions from one station to another there will be one page with <
# ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_LIST voters on it, unless the number of voters in
# that station happens to be an exact multiple of
# ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_LIST.
n_pages_expected = 39
self.assertEqual(n_pages_expected, n_pages_actual)
# check females
n_pages_actual = generate_pdf_center_list(self.filename, stations, FEMALE)
self.assertFileExists(self.filename)
self.assertEqual(n_pages_expected, n_pages_actual)
class TestGeneratePdfEdContent(TestGeneratePdfBase):
"""Compare the actual word-by-word content of the PDF with expected content."""
def setUp(self):
super(TestGeneratePdfEdContent, self).setUp()
# Create a PDF that will spill to multiple pages.
self.n_voters = settings.ROLLGEN_REGISTRATIONS_PER_PAGE_REGISTRATION + 1
self.voter_roll = create_voters(self.n_voters, FEMALE)
def test_sign_content(self):
"""Exercises generate_pdf_station_sign"""
station = self.run_station_distributor(self.voter_roll, 1)[0]
# Build a list of the lines I expect to see.
expected_lines = []
expected_lines.append(self.STRINGS['ed_polling_sign_header'])
mf_string = self.STRINGS['female']
# These are constructed "backwards" relative to how the actual code does it. It's
# necessary to do so because the text is laid out RtoL in the PDF.
center_name = apply_bidi(reshape(self.center.name))
expected_lines.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
expected_lines.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
copied_by = self.center.copied_by.all()
if self.center.copy_of:
expected_lines.append('{} :{}'.format(self.center.copy_of.center_id,
self.STRINGS['copy_of']))
elif copied_by:
copied_by = [center.center_id for center in copied_by]
copied_by = (' ' + ARABIC_COMMA).join(map(str, reversed(copied_by)))
expected_lines.append('{} :{}'.format(copied_by, self.STRINGS['copied_by_plural']))
expected_lines.append('{} {}{} {}'.format(str(station.number), NBSP, NBSP,
self.STRINGS['station_number']))
expected_lines.append('{} :{}'.format(mf_string, self.STRINGS['gender']))
expected_lines.append(self.STRINGS['names_range'])
lines = station_name_range(station)
expected_lines += format_station_name_range_lines(lines)
# Now generate the actual PDF and compare to expected.
n_pages = generate_pdf_station_sign(self.filename, station)
self.assertEqual(n_pages, 1)
# Now see what was actually in the PDF and compare to expected.
xml = extract_pdf_page(self.filename, 0)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
# Did center name wrap? If so, unwrap.
if expected_lines[1].startswith(actual_lines[2]):
actual_lines = unwrap_lines(actual_lines, 1)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_sign_content_unisex(self):
"""Exercises generate_pdf_station_sign() with a unisex voter roll.
This differs from male/female in that the first/last voter station names are more
complicated, and long center names must be truncated.
"""
# Create a center with a name that will cause problems if it isn't truncated.
name_length = settings.ROLLGEN_CENTER_NAME_TRUNCATE_AFTER + 25
center = RegistrationCenterFactory(name=generate_arabic_place_name(name_length))
n_voters = (settings.ROLLGEN_UNISEX_TRIGGER - 1) * 2
voter_roll = create_voters(n_voters)
males = [voter for voter in voter_roll if voter.gender == MALE]
females = [voter for voter in voter_roll if voter.gender == FEMALE]
voter_roll = males + females
station = self.run_station_distributor(voter_roll, 1)[0]
station.center = center
# Build a list of the lines I expect to see.
expected_lines = []
expected_lines.append(self.STRINGS['ed_polling_sign_header'])
# These are constructed "backwards" relative to how the actual code does it. It's
# necessary to do so because the text is laid out RtoL in the PDF.
center_name = reshape(center.name)
# Because gender is unisex, we have to truncate the center name
center_name = apply_bidi(truncate_center_name(center_name))
expected_lines.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
expected_lines.append('{} :{}'.format(center.center_id, self.STRINGS['center_number']))
expected_lines.append('{} {}{} {}'.format(str(station.number), NBSP, NBSP,
self.STRINGS['station_number']))
expected_lines.append('{} :{}'.format(self.STRINGS['unisex'], self.STRINGS['gender']))
expected_lines.append(self.STRINGS['names_range'])
lines = station_name_range(station)
expected_lines += format_station_name_range_lines(lines)
# Now generate the actual PDF and compare to expected.
n_pages = generate_pdf_station_sign(self.filename, station)
self.assertEqual(n_pages, 1)
xml = extract_pdf_page(self.filename, 0)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_station_book_content_for_cover(self):
"""Exercises generate_pdf_station_book and checks cover content"""
station = self.run_station_distributor(self.voter_roll, 1)[0]
# Build a list of the lines I expect to see.
expected_lines = []
# The string ed_station_book_cover is a multiline string so it is stored in self.STRINGS
# as a list of strings rather than just a simple string.
expected_lines += self.STRINGS['ed_station_book_cover']
mf_string = self.STRINGS['female']
expected_lines.append('{} :{}'.format(mf_string, self.STRINGS['gender']))
expected_lines.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
expected_lines.append('{} :{}'.format(apply_bidi(reshape(self.center.name)),
self.STRINGS['center_name']))
copied_by = self.center.copied_by.all()
if self.center.copy_of:
expected_lines.append('{} :{}'.format(self.center.copy_of.center_id,
self.STRINGS['copy_of']))
elif copied_by:
copied_by = [center.center_id for center in copied_by]
copied_by = (' ' + ARABIC_COMMA).join(map(str, reversed(copied_by)))
expected_lines.append('{} :{}'.format(copied_by, self.STRINGS['copied_by_plural']))
subconstituency_id = self.center.subconstituency.id
subconstituency_name = reshape(self.center.subconstituency.name_arabic)
subconstituency_name = apply_bidi(subconstituency_name)
subconstituency = '{} / {} :{}'.format(subconstituency_name, subconstituency_id,
self.STRINGS['subconstituency_name'])
expected_lines.append(subconstituency)
expected_lines.append("{} {}{} {}".format(station.number, NBSP, NBSP,
self.STRINGS['station_number']))
expected_lines.append(self.STRINGS['names_range'])
lines = station_name_range(station)
expected_lines += format_station_name_range_lines(lines)
# Now generate the actual PDF and compare to expected.
n_pages = generate_pdf_station_book(self.filename, station)
self.assertEqual(n_pages, 3)
xml = extract_pdf_page(self.filename, 0)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
# Did center name wrap? If so, unwrap.
if expected_lines[4].startswith(actual_lines[5]):
actual_lines = unwrap_lines(actual_lines, 4)
has_copy_info = (self.center.copy_of or self.center.copied_by)
if has_copy_info:
# Did copied by wrap? If so, unwrap.
if expected_lines[5].startswith(actual_lines[6]):
actual_lines = unwrap_lines(actual_lines, 5)
# Did subcon name wrap? If so, unwrap.
offset = 1 if has_copy_info else 0
if len(actual_lines) >= 7 + offset:
if expected_lines[5 + offset].startswith(actual_lines[6 + offset]):
actual_lines = unwrap_lines(actual_lines, 5 + offset)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_station_book_content_for_inner_pages(self):
"""Exercises generate_pdf_station_book and checks content of non-cover pages"""
station = self.run_station_distributor(self.voter_roll, 1)[0]
# Build a list of the lines I expect to see.
expected_lines = []
page_header = []
# Top header
page_header.append(self.STRINGS['ed_list_header_prefix'])
page_header.append(self.STRINGS['ed_station_book_header'])
# Top right items
mf_string = self.STRINGS['female']
page_header.append('{} :{}'.format(mf_string, self.STRINGS['gender']))
page_header.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
center_name = apply_bidi(truncate_center_name(reshape(self.center.name)))
page_header.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
# Header just above table that contains voter names
page_header.append("{} :{}".format(station.number, self.STRINGS['station_number']))
expected_lines += page_header
# Header for table of voter names
# In the PDF these are in table cells so they're separate from one another; to my code
# it looks as if they're adjacent.
params = (self.STRINGS['voted'], self.STRINGS['the_names'], self.STRINGS['number'])
expected_lines.append("{}{}{}".format(*params))
# Voter data
voters = station.roll[:settings.ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_BOOK]
f = lambda voter: '{}{}'.format(apply_bidi(reshape(format_name(voter))),
voter.registrant_number)
expected_lines += [f(voter) for voter in voters]
# Footer, including page #
expected_lines.append(mf_string)
expected_lines.append("2 / 1")
# Now generate the actual PDF and compare to expected.
n_pages = generate_pdf_station_book(self.filename, station)
self.assertEqual(n_pages, 3)
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
# Test last page.
expected_lines = []
expected_lines += page_header
# Header for table of voter names
# In the PDF these are in table cells so they're separate from one another; to my code
# it looks as if they're adjacent.
params = (self.STRINGS['voted'], self.STRINGS['the_names'], self.STRINGS['number'])
expected_lines.append("{}{}{}".format(*params))
# Voter data
# Get the voters for the last page. Negative slicing rocks!
n_voters = len(self.voter_roll)
n_last_page_voters = n_voters % settings.ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_BOOK
voters = station.roll[-n_last_page_voters:]
f = lambda voter: '{}{}'.format(apply_bidi(reshape(format_name(voter))),
voter.registrant_number)
expected_lines += list(map(f, voters))
# Footer, including page #
expected_lines.append(mf_string)
expected_lines.append("2 / 2")
# Now generate the actual PDF and compare to expected.
n_pages = generate_pdf_station_book(self.filename, station)
self.assertEqual(n_pages, 3)
xml = extract_pdf_page(self.filename, 2)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_center_list_content_for_cover(self):
"""Exercises generate_pdf_center_list and checks cover content"""
stations = self.run_station_distributor(self.voter_roll, 1)
expected_lines = []
expected_lines += self.STRINGS['ed_center_list_cover']
key = 'female' if (self.gender == FEMALE) else 'male'
mf_string = self.STRINGS[key]
expected_lines.append('{} :{}'.format(mf_string, self.STRINGS['gender']))
expected_lines.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
center_name = apply_bidi(reshape(self.center.name))
expected_lines.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
copied_by = self.center.copied_by.all()
if self.center.copy_of:
expected_lines.append('{} :{}'.format(self.center.copy_of.center_id,
self.STRINGS['copy_of']))
elif copied_by:
copied_by = [center.center_id for center in copied_by]
copied_by = (' ' + ARABIC_COMMA).join(map(str, reversed(copied_by)))
expected_lines.append('{} :{}'.format(copied_by, self.STRINGS['copied_by_plural']))
subconstituency_id = self.center.subconstituency.id
subconstituency_name = reshape(self.center.subconstituency.name_arabic)
subconstituency_name = apply_bidi(subconstituency_name)
subconstituency = '{} / {} :{}'.format(subconstituency_name, subconstituency_id,
self.STRINGS['subconstituency_name'])
expected_lines.append(subconstituency)
# Now generate the actual PDF and compare to expected.
generate_pdf_center_list(self.filename, stations, self.gender)
xml = extract_pdf_page(self.filename, 0)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
# Did center name wrap? If so, unwrap.
if expected_lines[4].startswith(actual_lines[5]):
actual_lines = unwrap_lines(actual_lines, 4)
has_copy_info = (self.center.copy_of or self.center.copied_by)
if has_copy_info:
# Did copied_by wrap? If so, unwrap.
if expected_lines[5].startswith(actual_lines[6]):
actual_lines = unwrap_lines(actual_lines, 5)
# Did subcon name wrap? If so, unwrap.
offset = 1 if has_copy_info else 0
if len(actual_lines) >= 7 + offset:
if expected_lines[5 + offset].startswith(actual_lines[6 + offset]):
actual_lines = unwrap_lines(actual_lines, 5 + offset)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
def test_center_list_content_for_inner_pages(self):
"""Exercises generate_pdf_center_list and checks content of non-cover pages"""
stations = self.run_station_distributor(self.voter_roll, 1)
expected_lines = []
page_header = []
page_header.append(self.STRINGS['ed_list_header_prefix'])
page_header.append(self.STRINGS['ed_center_list_header'])
key = 'female' if (self.gender == FEMALE) else 'male'
mf_string = self.STRINGS[key]
page_header.append('{} :{}'.format(mf_string, self.STRINGS['gender']))
page_header.append('{} :{}'.format(self.center.center_id, self.STRINGS['center_number']))
center_name = apply_bidi(truncate_center_name(reshape(self.center.name)))
page_header.append('{} :{}'.format(center_name, self.STRINGS['center_name']))
expected_lines += page_header
# Table header
params = (self.STRINGS['station_header'], self.STRINGS['the_names'], self.STRINGS['number'])
expected_lines.append('{}{}{}'.format(*params))
# Voters
station = stations[0]
voters = station.roll[:settings.ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_LIST]
f = lambda voter: '{}{}{}'.format(station.number,
apply_bidi(reshape(format_name(voter))),
voter.registrant_number)
expected_lines += list(map(f, voters))
# Footer, including page #
expected_lines.append(mf_string)
expected_lines.append("2 / 1")
# Now generate the actual PDF and compare to expected.
n_pages = generate_pdf_center_list(self.filename, stations, self.gender)
self.assertEqual(n_pages, 3)
xml = extract_pdf_page(self.filename, 1)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
# Test last page.
expected_lines = page_header
# Table header
params = (self.STRINGS['station_header'], self.STRINGS['the_names'], self.STRINGS['number'])
expected_lines.append('{}{}{}'.format(*params))
# Voter data
# Get the voters for the last page. Negative slicing rocks!
n_voters = len(self.voter_roll)
n_last_page_voters = n_voters % settings.ROLLGEN_REGISTRATIONS_PER_PAGE_POLLING_LIST
station = stations[0]
voters = station.roll[-n_last_page_voters:]
f = lambda voter: '{}{}{}'.format(station.number,
apply_bidi(reshape(format_name(voter))),
voter.registrant_number)
expected_lines += list(map(f, voters))
# Footer, including page #
expected_lines.append(mf_string)
expected_lines.append("2 / 2")
# Now generate the actual PDF and compare to expected.
n_pages = generate_pdf_center_list(self.filename, stations, self.gender)
self.assertEqual(n_pages, 3)
xml = extract_pdf_page(self.filename, 2)
textlines = extract_textlines(xml)
actual_lines = clean_textlines(textlines)
self.assertEqual(expected_lines, actual_lines)
for textline in textlines:
self.assertCorrectFontsInUse(textline)
class TestCopiedBy(TestGeneratePdfEdContent):
"""A copy of TestGeneratePdfEdContent, but run with different data.
This class uses a center that has one copy in order to exercise the copied_by code branch.
"""
def setUp(self):
super(TestCopiedBy, self).setUp()
self.center = self.original_center
# The methods below aren't affected by the copy_of/copied_by code so we don't need them
# to do anything here.
def test_sign_content_unisex(self):
pass
def test_station_book_content_for_inner_pages(self):
pass
def test_station_list_content_for_inner_pages(self):
pass
class TestCopyOfCenter(TestGeneratePdfEdContent):
"""A copy of TestGeneratePdfEdContent, but run with different data.
This class uses a center that is a copy in order to exercise the copy_of code branch.
"""
def setUp(self):
super(TestCopyOfCenter, self).setUp()
# Any of the copy centers will do.
self.center = self.copy_centers[2]
# The methods below aren't affected by the copy_of/copied_by code so we don't need them
# to do anything here.
def test_sign_content_unisex(self):
pass
def test_station_book_content_for_inner_pages(self):
pass
def test_station_list_content_for_inner_pages(self):
pass
| apache-2.0 | -3,944,757,570,715,039,000 | 40.74876 | 100 | 0.633067 | false |
junhe1026/blog-related-codes | PythonTipChallenge/gcd_lcm_reverse.py | 1 | 1197 | # -*- coding: utf-8 -*-
"""
我们经常遇到的问题是给你两个数,要你求最大公约数和最小公倍数。
今天我们反其道而行之,给你两个数a和b,计算出它们分别是哪两个数的最大公约数和最小公倍数。
输出这两个数,小的在前,大的在后,以空格隔开。若有多组解,输出它们之和最小的那组。
注:所给数据都有解,不用考虑无解的情况。
a = gcd(x,y)
b = lcm(x,y) = x*y/gcd(x,y) = x*y/a
-> x*y = a*b
"""
import math
def find_x_y(a,b):
product = a*b
match_num = []
for i in range(1, int(math.sqrt(product))+1):
if product % i == 0:
match_num.append((i, product//i))
min_num = (999, 999)
for i, item in enumerate(match_num):
if i == 0:
min_num = item
else:
if sum(min_num) > sum(item):
min_num = item
print(min_num[0], ' ', min_num[1])
find_x_y(6, 858)
"""更优化解,从开方后的数向下解,明显减少循环"""
a = 858
b = 6
k = max(a,b) / min(a,b)
i = int(k ** 0.5)
while i > 0:
if k % i == 0:
print("%d %d" % (i*min(a,b),k*min(a,b)/i))
break
i -= 1
| mit | 7,017,591,944,112,823,000 | 17.829787 | 50 | 0.517514 | false |
radamizell/WallApp | location/views.py | 1 | 2728 | from django.shortcuts import render, get_object_or_404,redirect
from django.views import View, generic
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from star_ratings.models import Rating
from django.core.exceptions import PermissionDenied
from django.db.models import F
from django.contrib.gis.measure import D
from django.contrib.gis.geos import Point
from .models import Places
from .forms import PostForm
import magic
# import audiotools
#CRUD
@login_required(login_url='/accounts/login/')
def post_create(request):
form= PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, 'Successfully Created')
return HttpResponseRedirect('/')
context= {
'form': form,
}
return render(request, 'location/post_form.html',context,)
def post_detail(request,id= None):
instance= get_object_or_404(Places, id=id)
context= {
'title': instance.title,
'instance': instance,
}
return render(request,'location/post_detail.html',context)
def post_update(request,id=None):
instance= get_object_or_404(Places, id=id)
form= PostForm(request.POST or None, request.FILES or None, instance=instance)
if form.is_valid():
instance= form.save(commit=False)
instance.save()
messages.success(request,'Saved')
#success
return HttpResponseRedirect(instance.get_absolute_url())
context= {
'title': instance.title,
'instance': instance,
'form': form,
}
return render(request, 'location/post_form.html', context)
def post_delete(request, id=id):
instance= get_object_or_404(Places, id=id)
if request.user.username == instance.usersave:
instance.delete()
messages.success(request, 'Success')
else:
raise PermissionDenied()
return redirect('posts:list')
def fetch_places(request):
finder_location = Point(-83,33)
nearby= Places.objects.filter(
location__distance_lte=(
finder_location,
D(km=40))).distance(finder_location).order_by('distance')[:10]
context= {
'object_listboy': nearby,
'title': 'wall',
}
return render(request, 'location/wall.html', context)
def fetch_places_loc(request):
lat= request.GET['latitude']
lon= request.GET['longitude']
finder_location = Point(float(lon),float(lat))
nearby= Places.objects.filter(
location__distance_lte=(
finder_location,
D(km=40))).distance(finder_location).order_by('distance').order_by('-rating__average')[:10]
context= {
'object_listboy': nearby,
'title': 'wall',
}
return render(request, 'location/wall.html', context)
| mit | 881,329,425,679,484,500 | 20.650794 | 94 | 0.732038 | false |
jad-b/Crank | crank/core/tests/test_workouts.py | 1 | 1048 | import json
import os
from crank.core.workouts import (Workouts, WorkoutsJSONEncoder,
WorkoutsJSONDecoder)
parent = os.path.dirname(os.path.abspath(__file__))
TEST_WKT_FILE = os.path.join(parent, 'fixtures', 'squat.wkt')
def test_workouts_storage():
"""Parse, save, and load workouts from file(s)."""
wkts = Workouts.parse_wkt_file(TEST_WKT_FILE)
assert len(wkts.workouts) == 43
wkts_filename = 'workouts.json.test'
wkts.filename = wkts_filename
wkts.save()
assert os.path.exists(wkts_filename)
del wkts
wkts2 = Workouts.load(wkts_filename)
assert len(wkts2.workouts) == 43, wkts2.workouts
assert not isinstance(wkts2.workouts, list), \
"Workouts shouldn't be in a list"
def test_workouts_encoding():
wkts = Workouts.parse_wkt_file(TEST_WKT_FILE)
wkts_json = json.dumps(wkts, cls=WorkoutsJSONEncoder)
wkts2 = json.loads(wkts_json, cls=WorkoutsJSONDecoder)
assert wkts.filename == wkts2.filename
assert wkts.workouts == wkts2.workouts
| mit | -806,672,521,524,367,100 | 29.823529 | 63 | 0.681298 | false |
richbrowne/f5-openstack-agent | test/functional/neutronless/esd/test_esd_pools.py | 1 | 9812 | # coding=utf-8
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver import \
iControlDriver
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
import json
import logging
import os
import pytest
import requests
from ..testlib.bigip_client import BigIpClient
from ..testlib.fake_rpc import FakeRPCPlugin
from ..testlib.service_reader import LoadbalancerReader
from ..testlib.resource_validator import ResourceValidator
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
def get_services(filename):
neutron_services_filename = (
os.path.join(os.path.dirname(os.path.abspath(__file__)),
filename)
)
return (json.load(open(neutron_services_filename)))
def get_fake_plugin_rpc(services):
rpcObj = FakeRPCPlugin(services)
return rpcObj
def get_icontrol_driver(icd_config, fake_plugin_rpc, esd, bigip):
class ConfFake(object):
def __init__(self, params):
self.__dict__ = params
for k, v in self.__dict__.items():
if isinstance(v, unicode):
self.__dict__[k] = v.encode('utf-8')
def __repr__(self):
return repr(self.__dict__)
icd = iControlDriver(ConfFake(icd_config),
registerOpts=False)
icd.plugin_rpc = fake_plugin_rpc
icd.connect()
esd.process_esd(icd.get_all_bigips())
icd.lbaas_builder.init_esd(esd)
icd.service_adapter.init_esd(esd)
return icd
@pytest.fixture()
def icd_config():
oslo_config_filename = (
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../config/overcloud_basic_agent_config.json')
)
OSLO_CONFIGS = json.load(open(oslo_config_filename))
config = deepcopy(OSLO_CONFIGS)
config['icontrol_hostname'] = pytest.symbols.bigip_mgmt_ip_public
config['icontrol_username'] = pytest.symbols.bigip_username
config['icontrol_password'] = pytest.symbols.bigip_password
return config
@pytest.fixture(scope="module")
def bigip():
return BigIpClient(pytest.symbols.bigip_mgmt_ip_public,
pytest.symbols.bigip_username,
pytest.symbols.bigip_password)
@pytest.fixture
def esd():
esd_dir = (
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../../etc/neutron/services/f5/esd')
)
return EsdTagProcessor(esd_dir)
@pytest.fixture
def esd_json():
esd_file = (
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../../../../etc/neutron/services/f5/esd/demo.json')
)
return (json.load(open(esd_file)))
def test_esd_pools(track_bigip_cfg, bigip, icd_config, demo_policy, esd,
esd_json):
env_prefix = icd_config['environment_prefix']
services = get_services('../../testdata/service_requests/l7_esd_pools.json')
fake_plugin_rpc = get_fake_plugin_rpc(services)
icontrol_driver = get_icontrol_driver(icd_config, fake_plugin_rpc, esd, bigip)
service_iter = iter(services)
validator = ResourceValidator(bigip, env_prefix)
# create loadbalancer
# lbaas-loadbalancer-create --name lb1 mgmt_v4_subnet
service = service_iter.next()
lb_reader = LoadbalancerReader(service)
folder = '{0}_{1}'.format(env_prefix, lb_reader.tenant_id())
icontrol_driver._common_service_handler(service)
assert bigip.folder_exists(folder)
# create listener
# lbaas-listener-create --name l1 --loadbalancer lb1 --protocol HTTP
# --protocol-port 80
service = service_iter.next()
listener = service['listeners'][0]
icontrol_driver._common_service_handler(service)
validator.assert_virtual_valid(listener, folder)
# create pool with session persistence
# lbaas-pool-create --name p1 --listener l1 --protocol HTTP
# --lb-algorithm ROUND_ROBIN --session-persistence type=SOURCE_IP
service = service_iter.next()
pool = service['pools'][0]
icontrol_driver._common_service_handler(service)
validator.assert_pool_valid(pool, folder)
# apply ESD
# lbaas-l7policy-create --name esd_demo_1 --listener l1 --action REJECT
# --description "Override pool session persistence"
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_esd_applied(esd_json['esd_demo_1'], listener, folder)
# delete pool
# lbaas-pool-delete p1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_deleted(pool, None, folder)
# deleting pool should NOT remove ESD
validator.assert_esd_applied(esd_json['esd_demo_1'], listener, folder)
# delete ESD
# lbaas-l7policy-delete esd_demo_1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_esd_removed(esd_json['esd_demo_1'], listener, folder)
# create new ESD
# lbaas-l7policy-create --name esd_demo_1 --listener l1 --action REJECT
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_esd_applied(esd_json['esd_demo_1'], listener, folder)
# create pool with session persistence
# lbaas-pool-create --name p1 --listener l1 --protocol HTTP
# --lb-algorithm ROUND_ROBIN --session-persistence type=SOURCE_IP
# --description "Should not override ESD session persistence"
service = service_iter.next()
icontrol_driver._common_service_handler(service)
pool = service['pools'][0]
validator.assert_pool_valid(pool, folder)
# creating pool should NOT remove ESD
validator.assert_esd_applied(esd_json['esd_demo_1'], listener, folder)
# delete pool
# lbaas-pool-delete p1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_pool_deleted(pool, None, folder)
# delete ESD
# lbaas-l7policy-delete esd_demo_1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_esd_removed(esd_json['esd_demo_1'], listener, folder)
# delete listener
# lbaas-listener-delete l1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_virtual_deleted(listener, folder)
# delete loadbalancer
# neutron lbaas-loadbalancer-delete lb1
service = service_iter.next()
icontrol_driver._common_service_handler(service, delete_partition=True)
assert not bigip.folder_exists(folder)
def test_multiple_esd_add_remove(track_bigip_cfg, bigip, icd_config, demo_policy,
esd, esd_json):
services = get_services('../../testdata/service_requests/'
'l7_multiple_esd_add_remove.json')
fake_plugin_rpc = get_fake_plugin_rpc(services)
icontrol_driver = get_icontrol_driver(icd_config, fake_plugin_rpc, esd, bigip)
env_prefix = icd_config['environment_prefix']
service_iter = iter(services)
validator = ResourceValidator(bigip, env_prefix)
# create loadbalancer
# lbaas-loadbalancer-create --name lb1 admin_subnet
service = service_iter.next()
lb_reader = LoadbalancerReader(service)
folder = '{0}_{1}'.format(env_prefix, lb_reader.tenant_id())
icontrol_driver._common_service_handler(service)
assert bigip.folder_exists(folder)
# create listener
# lbaas-listener-create --name listener1 --loadbalancer lb1 --protocol HTTP
# --protocol-port 80
service = service_iter.next()
listener = service['listeners'][0]
icontrol_driver._common_service_handler(service)
validator.assert_virtual_valid(listener, folder)
# apply ESD1
# lbaas-l7policy-create --name esd_demo_1 --listener listener1 --action REJECT
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# apply ESD2
# lbaas-l7policy-create --name esd_demo_2 --listener listener1 --action REJECT
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# validate ESD1 applied
validator.assert_esd_applied(esd_json['esd_demo_1'], listener, folder)
# delete ESD1 and check if ESD2 is still applied
# lbaas-l7policy-delete esd_demo_1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_esd_applied(esd_json['esd_demo_2'], listener, folder)
# delete ESD2
# lbaas-l7policy-delete esd_demo_1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_esd_removed(esd_json['esd_demo_1'], listener, folder)
validator.assert_esd_removed(esd_json['esd_demo_2'], listener, folder)
# delete listener
# lbaas-listener-delete listener1
service = service_iter.next()
icontrol_driver._common_service_handler(service)
validator.assert_virtual_deleted(listener, folder)
# delete loadbalancer
# neutron lbaas-loadbalancer-delete lb1
service = service_iter.next()
icontrol_driver._common_service_handler(service, delete_partition=True)
assert not bigip.folder_exists(folder)
| apache-2.0 | 8,795,121,001,418,996,000 | 34.550725 | 82 | 0.685691 | false |
VictorLowther/swift | swift/obj/auditor.py | 1 | 10106 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from eventlet import Timeout
from swift.obj import server as object_server
from swift.common.utils import get_logger, audit_location_generator, \
ratelimit_sleep, TRUE_VALUES, dump_recon_cache
from swift.common.exceptions import AuditException, DiskFileError, \
DiskFileNotExist
from swift.common.daemon import Daemon
SLEEP_BETWEEN_AUDITS = 30
class AuditorWorker(object):
"""Walk through file system to audit object"""
def __init__(self, conf, zero_byte_only_at_fps=0):
self.conf = conf
self.logger = get_logger(conf, log_route='object-auditor')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
TRUE_VALUES
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
self.auditor_type = 'ALL'
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
self.max_files_per_second = float(self.zero_byte_only_at_fps)
self.auditor_type = 'ZBF'
self.log_time = int(conf.get('log_time', 3600))
self.files_running_time = 0
self.bytes_running_time = 0
self.bytes_processed = 0
self.total_bytes_processed = 0
self.total_files_processed = 0
self.passes = 0
self.quarantines = 0
self.errors = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
def audit_all_objects(self, mode='once'):
self.logger.info(_('Begin object audit "%s" mode (%s)' %
(mode, self.auditor_type)))
begin = reported = time.time()
self.total_bytes_processed = 0
self.total_files_processed = 0
total_quarantines = 0
total_errors = 0
time_auditing = 0
all_locs = audit_location_generator(self.devices,
object_server.DATADIR,
mount_check=self.mount_check,
logger=self.logger)
for path, device, partition in all_locs:
loop_time = time.time()
self.object_audit(path, device, partition)
self.logger.timing_since('timing', loop_time)
self.files_running_time = ratelimit_sleep(
self.files_running_time, self.max_files_per_second)
self.total_files_processed += 1
now = time.time()
if now - reported >= self.log_time:
self.logger.info(_(
'Object audit (%(type)s). '
'Since %(start_time)s: Locally: %(passes)d passed, '
'%(quars)d quarantined, %(errors)d errors '
'files/sec: %(frate).2f , bytes/sec: %(brate).2f, '
'Total time: %(total).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f') % {
'type': self.auditor_type,
'start_time': time.ctime(reported),
'passes': self.passes, 'quars': self.quarantines,
'errors': self.errors,
'frate': self.passes / (now - reported),
'brate': self.bytes_processed / (now - reported),
'total': (now - begin), 'audit': time_auditing,
'audit_rate': time_auditing / (now - begin)})
dump_recon_cache({'object_auditor_stats_%s' %
self.auditor_type: {
'errors': self.errors,
'passes': self.passes,
'quarantined': self.quarantines,
'bytes_processed': self.bytes_processed,
'start_time': reported,
'audit_time': time_auditing}},
self.rcache, self.logger)
reported = now
total_quarantines += self.quarantines
total_errors += self.errors
self.passes = 0
self.quarantines = 0
self.errors = 0
self.bytes_processed = 0
time_auditing += (now - loop_time)
# Avoid divide by zero during very short runs
elapsed = (time.time() - begin) or 0.000001
self.logger.info(_(
'Object audit (%(type)s) "%(mode)s" mode '
'completed: %(elapsed).02fs. Total quarantined: %(quars)d, '
'Total errors: %(errors)d, Total files/sec: %(frate).2f , '
'Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, '
'Rate: %(audit_rate).2f') % {
'type': self.auditor_type, 'mode': mode, 'elapsed': elapsed,
'quars': total_quarantines, 'errors': total_errors,
'frate': self.total_files_processed / elapsed,
'brate': self.total_bytes_processed / elapsed,
'audit': time_auditing, 'audit_rate': time_auditing / elapsed})
def object_audit(self, path, device, partition):
"""
Audits the given object path.
:param path: a path to an object
:param device: the device the path is on
:param partition: the partition the path is on
"""
try:
if not path.endswith('.data'):
return
try:
name = object_server.read_metadata(path)['name']
except (Exception, Timeout), exc:
raise AuditException('Error when reading metadata: %s' % exc)
_junk, account, container, obj = name.split('/', 3)
df = object_server.DiskFile(self.devices, device, partition,
account, container, obj, self.logger,
keep_data_fp=True)
try:
if df.data_file is None:
# file is deleted, we found the tombstone
return
try:
obj_size = df.get_data_file_size()
except DiskFileError, e:
raise AuditException(str(e))
except DiskFileNotExist:
return
if self.zero_byte_only_at_fps and obj_size:
self.passes += 1
return
for chunk in df:
self.bytes_running_time = ratelimit_sleep(
self.bytes_running_time, self.max_bytes_per_second,
incr_by=len(chunk))
self.bytes_processed += len(chunk)
self.total_bytes_processed += len(chunk)
df.close()
if df.quarantined_dir:
self.quarantines += 1
self.logger.error(
_("ERROR Object %(path)s failed audit and will be "
"quarantined: ETag and file's md5 do not match"),
{'path': path})
finally:
df.close(verify_file=False)
except AuditException, err:
self.logger.increment('quarantines')
self.quarantines += 1
self.logger.error(_('ERROR Object %(obj)s failed audit and will '
'be quarantined: %(err)s'),
{'obj': path, 'err': err})
object_server.quarantine_renamer(
os.path.join(self.devices, device), path)
return
except (Exception, Timeout):
self.logger.increment('errors')
self.errors += 1
self.logger.exception(_('ERROR Trying to audit %s'), path)
return
self.passes += 1
class ObjectAuditor(Daemon):
"""Audit objects."""
def __init__(self, conf, **options):
self.conf = conf
self.logger = get_logger(conf, log_route='object-auditor')
self.conf_zero_byte_fps = int(
conf.get('zero_byte_files_per_second', 50))
def _sleep(self):
time.sleep(SLEEP_BETWEEN_AUDITS)
def run_forever(self, *args, **kwargs):
"""Run the object audit until stopped."""
# zero byte only command line option
zbo_fps = kwargs.get('zero_byte_fps', 0)
if zbo_fps:
# only start parent
parent = True
else:
parent = os.fork() # child gets parent = 0
kwargs = {'mode': 'forever'}
if parent:
kwargs['zero_byte_fps'] = zbo_fps or self.conf_zero_byte_fps
while True:
try:
self.run_once(**kwargs)
except (Exception, Timeout):
self.logger.exception(_('ERROR auditing'))
self._sleep()
def run_once(self, *args, **kwargs):
"""Run the object audit once."""
mode = kwargs.get('mode', 'once')
zero_byte_only_at_fps = kwargs.get('zero_byte_fps', 0)
worker = AuditorWorker(self.conf,
zero_byte_only_at_fps=zero_byte_only_at_fps)
worker.audit_all_objects(mode=mode)
| apache-2.0 | -6,427,883,266,433,169,000 | 42.93913 | 79 | 0.517613 | false |
centaurialpha/pireal | pireal/core/interpreter/exceptions.py | 1 | 2326 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2017 - Gabriel Acosta <[email protected]>
#
# This file is part of Pireal.
#
# Pireal is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Pireal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pireal; If not, see <http://www.gnu.org/licenses/>.
# This module is responsible for organizing called "tokens" pieces,
# each of these tokens has a meaning in language
class InterpreterError(Exception):
""" Excepción básica para errores generados por el intérprete """
class MissingQuoteError(InterpreterError):
""" Excepción para comillas faltantes en strings """
def __init__(self, msg, lineno, col):
InterpreterError.__init__(self, msg.format(lineno))
self.lineno = lineno - 1
self.column = col
class InvalidSyntaxError(InterpreterError):
""" Excepción para errores de sintáxis generados por el Lexer """
def __init__(self, lineno, col, char, msg="Invalid syntax on '{0}':'{1}'"):
InterpreterError.__init__(self, msg.format(lineno, col))
self.lineno = lineno
self.column = col
self.character = "<b>" + char + "</b>"
class ConsumeError(InterpreterError):
""" Excepción para errores generados por el Parser cuando no se espera
un determinado símbolo del lenguaje """
def __init__(self, expected, got, lineno, msg=None):
if msg is None:
msg = (f"It is expected to find '{expected}', "
f"but '{got}' found in line: '{lineno}'")
super().__init__(msg)
self.expected = expected
self.got = got
self.lineno = lineno
class DuplicateRelationNameError(InterpreterError):
""" Excepción para errores generados por el Interpreter cuando se
usa un nombre que ya existe en el SCOPE """
def __init__(self, rname):
super().__init__()
self.rname = rname
| gpl-3.0 | 8,379,900,348,257,351,000 | 33.58209 | 79 | 0.672421 | false |
ghetzel/webfriend | webfriend/rpc/network.py | 1 | 5261 | from __future__ import absolute_import
from webfriend.rpc import Base
from datetime import datetime
from base64 import b64decode
class Cookie(object):
def __init__(self, rpc, definition):
self._rpc = rpc
self._definition = definition
self.name = definition['name']
self.value = definition['value']
self.domain = definition.get('domain')
self.path = definition.get('path')
self.expires_epoch = definition.get('expires')
self.size = definition.get('size')
self.http_only = definition.get('httpOnly')
self.secure = definition.get('secure')
self.session = definition.get('session')
self.same_site = definition.get('sameSite')
if self.expires_epoch is not None:
self.expires = datetime.fromtimestamp(self.expires_epoch / 1e3)
def as_dict(self):
return dict([
(k, v) for k, v in self.__dict__.items() if not k.startswith('_')
])
class Network(Base):
"""
See: https://chromedevtools.github.io/devtools-protocol/tot/Network
"""
domain = 'Network'
connection_types = [
'none', 'cellular2g', 'cellular3g', 'cellular4g', 'bluetooth', 'ethernet', 'wifi', 'wimax',
'other'
]
def set_user_agent(self, user_agent):
self.call('setUserAgentOverride', userAgent=user_agent)
def set_headers(self, headers):
if not isinstance(headers, dict):
raise AttributeError("Headers must be specified as a dict")
self.call('setExtraHTTPHeaders', headers=headers)
def get_response_body(self, request_id):
reply = self.call('getResponseBody', requestId=request_id)
body = reply.get('body')
if not body:
return None
if reply.get('base64Encoded') is True:
body = b64decode(body)
return body
@property
def can_clear_browser_cache(self):
return self.call_boolean_response('canClearBrowserCache')
@property
def can_clear_browser_cookies(self):
return self.call_boolean_response('canClearBrowserCookies')
@property
def can_emulate_network_conditions(self):
return self.call_boolean_response('canEmulateNetworkConditions')
def clear_browser_cache(self):
self.call('clearBrowserCache')
def clear_browser_cookies(self):
self.call('clearBrowserCookies')
def emulate_network_conditions(
self,
offline=False,
latency_ms=0,
throughput_down_bps=None,
throughput_up_bps=None,
connection_type=None,
):
params = {
'offline': offline,
'latency': latency_ms,
'downloadThroughput': 10e9,
'uploadThroughput': 10e9,
}
if connection_type is not None:
if connection_type not in self.connection_types:
raise AttributeError("Connection Type must be one of: {}".format(
', '.join(self.connection_types)
))
params['connectionType'] = connection_type
self.call('emulateNetworkConditions', **params)
def disable_cache(self):
self.call('setCacheDisabled', cacheDisabled=True)
def enable_cache(self):
self.call('setCacheDisabled', cacheDisabled=False)
def set_blocked_urls(self, urls):
if not isinstance(urls, list):
raise AttributeError("Blocked URLs must be a list")
self.call('setBlockedURLs', urls=urls)
def replay_xhr(self, request_id):
self.call('replayXHR', requestId=request_id)
def get_cookies(self, urls=None):
if isinstance(urls, list):
reply = self.call('getCookies', urls=urls)
else:
reply = self.call('getAllCookies')
return [
Cookie(self, c) for c in reply.get('cookies', [])
]
def delete_cookie(self, url, name):
self.call('deleteCookie', cookieName=name, url=url)
def set_cookie(
self,
url,
name,
value,
domain=None,
path=None,
secure=None,
http_only=None,
same_site=None,
expires=None
):
"""
Create or update a cookie based on the given values.
"""
params = {
'url': url,
'name': name,
'value': value,
}
if domain is not None:
params['domain'] = domain
if path is not None:
params['path'] = path
if isinstance(secure, bool):
params['secure'] = secure
if isinstance(http_only, bool):
params['httpOnly'] = http_only
if isinstance(same_site, basestring):
params['sameSite'] = same_site
if isinstance(expires, int):
params['expirationDate'] = expires
elif isinstance(expires, datetime):
params['expirationDate'] = int(expires.strftime('%s'))
return self.call_boolean_response(self.call('setCookie', **params), 'success')
def get_certificate(self, origin):
# getCertificate
return Exception("NOT IMPLEMENTED")
| bsd-2-clause | -5,181,091,513,582,721,000 | 28.55618 | 99 | 0.578787 | false |
SmartDataAnalytics/LC-QuAD | utils/natural_language_utilities.py | 1 | 7104 | """
@TODO: major rewrite here!
"""
import re
import html
import string
import os.path
import inflect
import warnings
import validators
from urllib.parse import urlparse
# SOME MACROS
STOPWORDLIST = 'resources/stopwords.txt'
KNOWN_SHORTHANDS = ['dbo', 'dbp', 'rdf', 'rdfs', 'dbr', 'foaf', 'geo', 'res', 'dct']
DBP_SHORTHANDS = {'dbo': 'http://dbpedia.org/ontology/', 'dbp': 'http://dbpedia.org/property/',
'dbr': 'http://dbpedia.org/resource/', 'res': 'http://dbpedia.org/resource/'}
# @TODO Import the above list from http://dbpedia.org/sparql?nsdecl
p = inflect.engine()
# Few regex to convert camelCase to _ i.e DonaldTrump to donald trump
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
variable_regex = r"(?<=%\()\S*(?=\)s)"
re1 = re.compile(r' +')
stopwords = open(STOPWORDLIST).read().split('\n')
# Better warning formatting. Ignore.
def better_warning(message, category, filename, lineno, file=None, line=None):
return ' %s:%s: %s:%s\n' % (filename, lineno, category.__name__, message)
def has_url(_string):
if validators.url(_string):
return True
return False
def get_variables(_string):
return re.findall(variable_regex, _string, re.MULTILINE)
def tokenize(_input, _ignore_brackets=False, _remove_stopwords=False):
"""
Tokenize a question.
Changes:
- removes question marks
- removes commas
- removes trailing spaces
- can remove text inside one-level brackets.
@TODO: Improve tokenization
Used in: parser.py; krantikari.py
:param _input: str,
:param _ignore_brackets: bool
:return: list of tokens
"""
cleaner_input = _input.replace("?", "").replace(",", "").strip()
if _ignore_brackets:
# If there's some text b/w brackets, remove it. @TODO: NESTED parenthesis not covered.
pattern = r'\([^\)]*\)'
matcher = re.search(pattern, cleaner_input, 0)
if matcher:
substring = matcher.group()
cleaner_input = cleaner_input[:cleaner_input.index(substring)] + cleaner_input[
cleaner_input.index(substring) + len(
substring):]
return cleaner_input.strip().split() if not _remove_stopwords else remove_stopwords(cleaner_input.strip().split())
def is_clean_url(_string):
"""
!!!! ATTENTION !!!!!
Radical changes about.
"""
if validators.url(_string):
if _string[-3:-1] == '__' and _string[-1] in string.digits:
return False
if _string[-1] == ',':
return False
if 'dbpedia' not in _string:
return False
# Lets kick out all the literals too?
return True
else:
return False
def is_shorthand(_string):
splitted_string = _string.split(':')
if len(splitted_string) == 1:
return False
if splitted_string[0] in KNOWN_SHORTHANDS:
# Validate the right side of the ':'
if '/' in splitted_string[1]:
return False
return True
return False
def is_type_constraint(_string, _convert_shorthand = False):
_string = _string.strip().lower().replace('<','').replace('>','')
type_constraint = False
if _string == 'a':
type_constraint = True
if _string == 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type':
type_constraint = True
if _string == 'rdf:type':
type_constraint = True
if type_constraint:
return 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type' if _convert_shorthand else True
else:
return '' if _convert_shorthand else False
def is_dbpedia_uri(_string):
# Check if it is a DBpedia shorthand
if is_dbpedia_shorthand(_string=_string, _convert=False):
return True
elif _string.startswith('http://dbpedia.org/'):
return True
return False
def is_dbpedia_shorthand(_string, _convert=True):
if not is_shorthand(_string):
return _string if _convert else False
splitted_string = _string.split(':')
if len(splitted_string) == 1:
warnings.warn("Invalid string: %s \n "
+ "Please check it yourself, and extrapolate what breaks!")
return _string if _convert else False
if splitted_string[0] in DBP_SHORTHANDS.keys():
# Validate the right side of the ':'
if '/' in splitted_string[1]:
warnings.warn("Invalid string: %s \n "
+ "Please check it yourself, and extrapolate what breaks!")
return _string if _convert else False
return ''.join([DBP_SHORTHANDS[splitted_string[0]], splitted_string[1]]) if _convert else True
return _string if _convert else False
def is_literal(_string):
# Very rudimentary logic. Make it better sometime later.
if has_url(_string) or is_shorthand(_string):
return False
return True
def convert(_string):
s1 = first_cap_re.sub(r'\1_\2', _string)
return all_cap_re.sub(r'\1_\2', s1)
def get_label_via_parsing(_uri, lower=False):
# Sanity strip: remove all '<' and '>' from here
_uri = _uri.replace('<', '')
_uri = _uri.replace('>', '')
parsed = urlparse(_uri)
path = os.path.split(parsed.path)
unformated_label = path[-1]
label = convert(unformated_label)
label = " ".join(label.split("_"))
if lower:
return label.lower()
return label
def remove_stopwords(_tokens):
return [x for x in _tokens if x.strip().lower() not in stopwords]
def sq_bracket_checker(uri, reverse=True, update=True):
"""
Checks if uri ends and starts with '>' and '<' respectively.
if update= True then also update the uri
:param uri: str
:param reverse: flag: remove sq brackets if there
:param update: returns updated uri
:return:
"""
if uri[0] != '<':
if update:
uri = "<" + uri
else:
return False
if uri[-1] != '>':
if update:
uri = uri + ">"
else:
return False
if reverse:
return uri[1:-1]
return uri
def fixup(x):
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>','u_n').replace(' @.@ ', '.').replace(
' @-@ ','-').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x))
def get_plural(_words):
"""
Input can be one word or more. We need to get its plural form.
:param _words: str
:return: str
"""
return p.plural(_words)
if __name__ == "__main__":
uris = ["http://dbpedia.org/ontology/Airport", "http://dbpedia.org/property/garrison",
"<http://dbpedia.org/property/MohnishDubey"]
for uri in uris:
print(get_label_via_parsing(uri))
| gpl-3.0 | -3,210,064,065,391,750,700 | 27.190476 | 118 | 0.574324 | false |
zozo123/buildbot | master/buildbot/data/root.py | 1 | 1704 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.data import base
from buildbot.data import types
from twisted.internet import defer
class RootEndpoint(base.Endpoint):
isCollection = True
pathPatterns = "/"
def get(self, resultSpec, kwargs):
return defer.succeed(self.master.data.rootLinks)
class Root(base.ResourceType):
name = "rootlink"
plural = "rootlinks"
endpoints = [RootEndpoint]
class EntityType(types.Entity):
name = types.String()
entityType = EntityType(name)
class SpecEndpoint(base.Endpoint):
isCollection = True
pathPatterns = "/application.spec"
def get(self, resultSpec, kwargs):
return defer.succeed(self.master.data.allEndpoints())
class Spec(base.ResourceType):
name = "spec"
plural = "specs"
endpoints = [SpecEndpoint]
class EntityType(types.Entity):
path = types.String()
type = types.String()
plural = types.String()
type_spec = types.JsonObject()
entityType = EntityType(name)
| gpl-3.0 | 673,381,087,221,038,600 | 28.894737 | 79 | 0.714202 | false |
osroom/osroom | apps/modules/user/process/avatar_upload.py | 1 | 3766 | #!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : Allen Woo
import base64
import json
import os
from flask import request
from flask_babel import gettext
from flask_login import current_user
from apps.configs.sys_config import APPS_PATH
from apps.modules.user.process.get_or_update_user import get_one_user, update_one_user
from apps.modules.user.process.user_profile_process import delete_user_info_cache
from apps.utils.image.image import ImageCompression
from apps.utils.upload.file_up import file_up, file_del, fileup_base_64
from apps.app import mdbs
from apps.core.utils.get_config import get_config
from apps.utils.upload.get_filepath import get_file_url
def avatar_upload():
"""
头像上传
:return:
"""
result = None
imgfile_base = request.argget.all("imgfile_base")
max_size_mb = get_config("account", "USER_AVATAR_MAX_SIZE")
max_size_b = max_size_mb * 1024 * 1024
if imgfile_base:
if len(imgfile_base) > max_size_b:
data = {
"msg": gettext(
"Upload avatar image can not exceed {}M".format(max_size_mb)),
"msg_type": "w",
"custom_status": 413}
return data
else:
result = fileup_base_64(
uploaded_files=[imgfile_base],
prefix="user_avatar/")
else:
file = request.files['upfile']
if len(file.read()) > max_size_b:
data = {
"msg": gettext(
"Upload avatar image can not exceed {}M".format(max_size_mb)),
"msg_type": "w",
"custom_status": 413}
return data
if file:
tailoring = request.argget.all('tailoring')
if tailoring:
if not isinstance(tailoring, dict):
tailoring = json.loads(tailoring)
for k in ["width", "height", "x", "y", "rotate"]:
tailoring.setdefault(k, 0)
result = file_up(
uploaded_files=[file],
prefix="user_avatar/",
tailoring=tailoring)
data = {}
if result:
result = result[0]
user = get_one_user(user_id=current_user.str_id)
if user:
if user['avatar_url'] and "key" in user['avatar_url'] \
and result["key"] != user['avatar_url']["key"]:
# 当使用了不同的名字删除老的头像
file_del(user['avatar_url'])
update_data = {
"avatar_url": result
}
r = update_one_user(
user_id=current_user.str_id, updata={
"$set": update_data})
if not r.matched_count:
data = {
'msg': gettext("Save failed"),
'msg_type': "w",
"custom_status": 400}
else:
if result["type"] == "local":
# 如果保存再本地的话, 保存为一定尺寸大小
path = "{}{}".format(APPS_PATH, get_file_url(result))
imgcp = ImageCompression(path, path)
ava_size = get_config("account", "USER_AVATAR_SIZE")
imgcp.custom_pixels(ava_size[0], ava_size[1])
data = {
'msg': gettext("Save successfully"),
'msg_type': "s",
"custom_status": 201}
if not data:
data = {
'msg': gettext("Upload failed"),
'msg_type': "w",
"custom_status": 400}
# 清理user信息数据缓存
delete_user_info_cache(user_id=current_user.str_id)
return data
| bsd-2-clause | 848,355,737,162,953,000 | 33.679245 | 86 | 0.515234 | false |
antoniodemora/python-telegram-bot | docs/source/conf.py | 1 | 9334 | # -*- coding: utf-8 -*-
#
# Python Telegram Bot documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 10 22:25:07 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Telegram Bot'
copyright = u'2015, Leandro Toledo'
author = u'Leandro Toledo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.8'
# The full version, including alpha/beta/rc tags.
release = '2.8.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonTelegramBotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PythonTelegramBot.tex', u'Python Telegram Bot Documentation',
u'Leandro Toledo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pythontelegrambot', u'Python Telegram Bot Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PythonTelegramBot', u'Python Telegram Bot Documentation',
author, 'PythonTelegramBot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 | 4,470,365,260,915,227,600 | 31.522648 | 79 | 0.709128 | false |
WebCampZg/conference-web | workshops/models.py | 1 | 2590 | from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
from django.db.models.deletion import PROTECT
from django_extensions.db.fields import AutoSlugField
class Workshop(models.Model):
event = models.ForeignKey('events.Event', on_delete=PROTECT, related_name='workshops')
applicants = models.ManyToManyField('cfp.Applicant')
application = models.OneToOneField(
'cfp.PaperApplication', null=True, on_delete=PROTECT, related_name='workshop')
title = models.CharField(max_length=80)
slug = AutoSlugField(populate_from="title", unique=True)
about = models.TextField()
abstract = models.TextField()
extra_info = models.TextField(blank=True)
skill_level = models.ForeignKey('cfp.AudienceSkillLevel', on_delete=PROTECT)
starts_at = models.DateTimeField(null=True, blank=True)
duration_hours = models.DecimalField(max_digits=3, decimal_places=1)
tickets_link = models.URLField(blank=True)
price = models.PositiveIntegerField(blank=True, null=True)
published = models.BooleanField(default=True)
sold_out = models.BooleanField(default=False)
rate_url = models.URLField(blank=True)
joindin_url = models.URLField(blank=True, help_text="URL to the event on JoindIn API.")
@property
def approximate_euro_price(self):
return int(self.price / 7.5) if self.price else None
def applicant_names(self):
return [a.full_name for a in self.applicants.all()]
def page_title(self):
return "{}: {}".format(", ".join(self.applicant_names()), self.title)
def random_applicant(self):
return self.applicants.order_by('?').first()
def image(self):
applicant = self.random_applicant()
return applicant.image if applicant else None
def image_url(self):
image = self.image()
return image.url if image else static("images/placeholder.png")
def update_from_application(self):
"""
Copies over the talk details from it's application.
Used when the user updates the application, to reflect the changes on
the talk. Does not change the slug to keep the link the same, this
should be done manually if desired.
"""
self.title = self.application.title
self.about = self.application.about
self.abstract = self.application.abstract
self.skill_level = self.application.skill_level
def __str__(self):
return self.page_title()
def __repr__(self):
return '<Workshop #{}: {}>'.format(self.pk, self.title)
| bsd-3-clause | 810,368,390,674,918,300 | 37.656716 | 91 | 0.688031 | false |
MattDevo/edk2 | BaseTools/Source/Python/CommonDataClass/CommonClass.py | 1 | 4241 | ## @file
# This file is used to define common items of class object
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
## SkuInfoClass
#
# This class defined SkuInfo item used in Module/Platform/Package files
#
# @param object: Inherited from object class
# @param SkuIdName: Input value for SkuIdName, default is ''
# @param SkuId: Input value for SkuId, default is ''
# @param VariableName: Input value for VariableName, default is ''
# @param VariableGuid: Input value for VariableGuid, default is ''
# @param VariableOffset: Input value for VariableOffset, default is ''
# @param HiiDefaultValue: Input value for HiiDefaultValue, default is ''
# @param VpdOffset: Input value for VpdOffset, default is ''
# @param DefaultValue: Input value for DefaultValue, default is ''
#
# @var SkuIdName: To store value for SkuIdName
# @var SkuId: To store value for SkuId
# @var VariableName: To store value for VariableName
# @var VariableGuid: To store value for VariableGuid
# @var VariableOffset: To store value for VariableOffset
# @var HiiDefaultValue: To store value for HiiDefaultValue
# @var VpdOffset: To store value for VpdOffset
# @var DefaultValue: To store value for DefaultValue
#
class SkuInfoClass(object):
def __init__(self, SkuIdName = '', SkuId = '', VariableName = '', VariableGuid = '', VariableOffset = '',
HiiDefaultValue = '', VpdOffset = '', DefaultValue = '', VariableGuidValue = '', VariableAttribute = '', DefaultStore = None):
self.SkuIdName = SkuIdName
self.SkuId = SkuId
#
# Used by Hii
#
if DefaultStore is None:
DefaultStore = {}
self.VariableName = VariableName
self.VariableGuid = VariableGuid
self.VariableGuidValue = VariableGuidValue
self.VariableOffset = VariableOffset
self.HiiDefaultValue = HiiDefaultValue
self.VariableAttribute = VariableAttribute
self.DefaultStoreDict = DefaultStore
#
# Used by Vpd
#
self.VpdOffset = VpdOffset
#
# Used by Default
#
self.DefaultValue = DefaultValue
## Convert the class to a string
#
# Convert each member of the class to string
# Organize to a signle line format string
#
# @retval Rtn Formatted String
#
def __str__(self):
Rtn = 'SkuId = ' + str(self.SkuId) + "," + \
'SkuIdName = ' + str(self.SkuIdName) + "," + \
'VariableName = ' + str(self.VariableName) + "," + \
'VariableGuid = ' + str(self.VariableGuid) + "," + \
'VariableOffset = ' + str(self.VariableOffset) + "," + \
'HiiDefaultValue = ' + str(self.HiiDefaultValue) + "," + \
'VpdOffset = ' + str(self.VpdOffset) + "," + \
'DefaultValue = ' + str(self.DefaultValue) + ","
return Rtn
def __deepcopy__(self,memo):
new_sku = SkuInfoClass()
new_sku.SkuIdName = self.SkuIdName
new_sku.SkuId = self.SkuId
new_sku.VariableName = self.VariableName
new_sku.VariableGuid = self.VariableGuid
new_sku.VariableGuidValue = self.VariableGuidValue
new_sku.VariableOffset = self.VariableOffset
new_sku.HiiDefaultValue = self.HiiDefaultValue
new_sku.VariableAttribute = self.VariableAttribute
new_sku.DefaultStoreDict = {key:value for key,value in self.DefaultStoreDict.items()}
new_sku.VpdOffset = self.VpdOffset
new_sku.DefaultValue = self.DefaultValue
return new_sku
| bsd-2-clause | 895,875,367,268,498,000 | 41.721649 | 143 | 0.620844 | false |
juliancantillo/royal-films | royalfilms/cinemas/migrations/0011_auto_20160311_0105.py | 1 | 1251 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-11 06:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cinemas', '0010_cinema_city'),
]
operations = [
migrations.RemoveField(
model_name='show',
name='auditorium',
),
migrations.RemoveField(
model_name='showtimes',
name='show',
),
migrations.AddField(
model_name='show',
name='showtime',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cinemas.Showtimes'),
preserve_default=False,
),
migrations.AddField(
model_name='showtimes',
name='auditorium',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cinemas.Auditorium'),
),
migrations.AlterField(
model_name='function',
name='showtimes',
field=models.ManyToManyField(related_name='functions_auditoriums', through='cinemas.Showtimes', to='cinemas.Auditorium'),
),
]
| mit | 1,754,208,661,855,668,500 | 30.275 | 133 | 0.590727 | false |
boegel/easybuild-framework | test/framework/include.py | 1 | 12673 | # #
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for eb command line options.
@author: Kenneth Hoste (Ghent University)
"""
import os
import sys
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered
from unittest import TextTestRunner
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, write_file
from easybuild.tools.include import include_easyblocks, include_module_naming_schemes, include_toolchains
from easybuild.tools.include import is_software_specific_easyblock
def up(path, cnt):
"""Return path N times up."""
if cnt > 0:
path = up(os.path.dirname(path), cnt - 1)
return path
class IncludeTest(EnhancedTestCase):
"""Testcases for command line options."""
logfile = None
def test_include_easyblocks(self):
"""Test include_easyblocks()."""
test_easyblocks = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox', 'easybuild', 'easyblocks')
# put a couple of custom easyblocks in place, to test
myeasyblocks = os.path.join(self.test_prefix, 'myeasyblocks')
mkdir(os.path.join(myeasyblocks, 'generic'), parents=True)
# include __init__.py files that should be ignored, and shouldn't cause trouble (bug #1697)
write_file(os.path.join(myeasyblocks, '__init__.py'), "# dummy init, should not get included")
write_file(os.path.join(myeasyblocks, 'generic', '__init__.py'), "# dummy init, should not get included")
myfoo_easyblock_txt = '\n'.join([
"from easybuild.easyblocks.generic.configuremake import ConfigureMake",
"class EB_Foo(ConfigureMake):",
" pass",
])
write_file(os.path.join(myeasyblocks, 'myfoo.py'), myfoo_easyblock_txt)
mybar_easyblock_txt = '\n'.join([
"from easybuild.framework.easyblock import EasyBlock",
"class Bar(EasyBlock):",
" pass",
])
write_file(os.path.join(myeasyblocks, 'generic', 'mybar.py'), mybar_easyblock_txt)
# second myfoo easyblock, should get ignored...
myfoo_bis = os.path.join(self.test_prefix, 'myfoo.py')
write_file(myfoo_bis, '')
# hijack $HOME to test expanding ~ in locations passed to include_easyblocks
os.environ['HOME'] = myeasyblocks
# expand set of known easyblocks with our custom ones;
# myfoo easyblock is included twice, first path should have preference
glob_paths = [os.path.join('~', '*'), os.path.join(myeasyblocks, '*/*.py'), myfoo_bis]
included_easyblocks_path = include_easyblocks(self.test_prefix, glob_paths)
expected_paths = ['__init__.py', 'easyblocks/__init__.py', 'easyblocks/myfoo.py',
'easyblocks/generic/__init__.py', 'easyblocks/generic/mybar.py']
for filepath in expected_paths:
fullpath = os.path.join(included_easyblocks_path, 'easybuild', filepath)
self.assertTrue(os.path.exists(fullpath), "%s exists" % fullpath)
# path to included easyblocks should be prepended to Python search path
self.assertEqual(sys.path[0], included_easyblocks_path)
# importing custom easyblocks should work
import easybuild.easyblocks.myfoo
myfoo_pyc_path = easybuild.easyblocks.myfoo.__file__
myfoo_real_py_path = os.path.realpath(os.path.join(os.path.dirname(myfoo_pyc_path), 'myfoo.py'))
self.assertTrue(os.path.samefile(up(myfoo_real_py_path, 1), myeasyblocks))
del sys.modules['easybuild.easyblocks.myfoo']
import easybuild.easyblocks.generic.mybar
mybar_pyc_path = easybuild.easyblocks.generic.mybar.__file__
mybar_real_py_path = os.path.realpath(os.path.join(os.path.dirname(mybar_pyc_path), 'mybar.py'))
self.assertTrue(os.path.samefile(up(mybar_real_py_path, 2), myeasyblocks))
del sys.modules['easybuild.easyblocks.generic.mybar']
# existing (test) easyblocks are unaffected
import easybuild.easyblocks.foofoo
foofoo_path = os.path.dirname(os.path.dirname(easybuild.easyblocks.foofoo.__file__))
self.assertTrue(os.path.samefile(foofoo_path, test_easyblocks))
del sys.modules['easybuild.easyblocks.foofoo']
def test_include_easyblocks_priority(self):
"""Test whether easyblocks included via include_easyblocks() get priority over others."""
test_easyblocks = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sandbox', 'easybuild', 'easyblocks')
# make sure that test 'foo' easyblock is there
import easybuild.easyblocks.foo
foo_path = os.path.dirname(os.path.dirname(easybuild.easyblocks.foo.__file__))
self.assertTrue(os.path.samefile(foo_path, test_easyblocks))
# inject custom 'foo' easyblocks
myeasyblocks = os.path.join(self.test_prefix, 'myeasyblocks')
mkdir(myeasyblocks)
# include __init__.py file that should be ignored, and shouldn't cause trouble (bug #1697)
write_file(os.path.join(myeasyblocks, '__init__.py'), "# dummy init, should not get included")
# 'undo' import of foo easyblock
del sys.modules['easybuild.easyblocks.foo']
foo_easyblock_txt = '\n'.join([
"from easybuild.framework.easyblock import EasyBlock",
"class EB_Foo(EasyBlock):",
" pass",
])
write_file(os.path.join(myeasyblocks, 'foo.py'), foo_easyblock_txt)
# check that the sandboxed easyblock is imported before include_easyblocks is run
foo_pyc_path = easybuild.easyblocks.foo.__file__
foo_real_py_path = os.path.realpath(os.path.join(os.path.dirname(foo_pyc_path), 'foo.py'))
self.assertTrue(os.path.samefile(os.path.dirname(os.path.dirname(foo_pyc_path)), test_easyblocks))
self.assertFalse(os.path.samefile(foo_real_py_path, os.path.join(myeasyblocks, 'foo.py')))
include_easyblocks(self.test_prefix, [os.path.join(myeasyblocks, 'foo.py')])
# check that the included easyblock is imported after include_easyblocks is run
foo_pyc_path = easybuild.easyblocks.foo.__file__
foo_real_py_path = os.path.realpath(os.path.join(os.path.dirname(foo_pyc_path), 'foo.py'))
self.assertFalse(os.path.samefile(os.path.dirname(os.path.dirname(foo_pyc_path)), test_easyblocks))
self.assertTrue(os.path.samefile(foo_real_py_path, os.path.join(myeasyblocks, 'foo.py')))
# check that the included easyblock is not loaded
self.assertFalse('easybuild.easyblocks.foo' in sys.modules)
def test_include_mns(self):
"""Test include_module_naming_schemes()."""
my_mns = os.path.join(self.test_prefix, 'my_mns')
mkdir(my_mns)
# include __init__.py file that should be ignored, and shouldn't cause trouble (bug #1697)
write_file(os.path.join(my_mns, '__init__.py'), "# dummy init, should not get included")
my_mns_txt = '\n'.join([
"from easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme",
"class MyMNS(ModuleNamingScheme):",
" pass",
])
write_file(os.path.join(my_mns, 'my_mns.py'), my_mns_txt)
my_mns_bis = os.path.join(self.test_prefix, 'my_mns.py')
write_file(my_mns_bis, '')
# include custom MNS
included_mns_path = include_module_naming_schemes(self.test_prefix, [os.path.join(my_mns, '*.py'), my_mns_bis])
expected_paths = ['__init__.py', 'tools/__init__.py', 'tools/module_naming_scheme/__init__.py',
'tools/module_naming_scheme/my_mns.py']
for filepath in expected_paths:
fullpath = os.path.join(included_mns_path, 'easybuild', filepath)
self.assertTrue(os.path.exists(fullpath), "%s exists" % fullpath)
# path to included MNSs should be prepended to Python search path
self.assertEqual(sys.path[0], included_mns_path)
# importing custom MNS should work
import easybuild.tools.module_naming_scheme.my_mns
my_mns_pyc_path = easybuild.tools.module_naming_scheme.my_mns.__file__
my_mns_real_py_path = os.path.realpath(os.path.join(os.path.dirname(my_mns_pyc_path), 'my_mns.py'))
self.assertTrue(os.path.samefile(up(my_mns_real_py_path, 1), my_mns))
def test_include_toolchains(self):
"""Test include_toolchains()."""
my_toolchains = os.path.join(self.test_prefix, 'my_toolchains')
mkdir(my_toolchains)
# include __init__.py file that should be ignored, and shouldn't cause trouble (bug #1697)
write_file(os.path.join(my_toolchains, '__init__.py'), "# dummy init, should not get included")
for subdir in ['compiler', 'fft', 'linalg', 'mpi']:
mkdir(os.path.join(my_toolchains, subdir))
my_tc_txt = '\n'.join([
"from easybuild.toolchains.compiler.my_compiler import MyCompiler",
"class MyTc(MyCompiler):",
" pass",
])
write_file(os.path.join(my_toolchains, 'my_tc.py'), my_tc_txt)
my_compiler_txt = '\n'.join([
"from easybuild.tools.toolchain.compiler import Compiler",
"class MyCompiler(Compiler):",
" pass",
])
write_file(os.path.join(my_toolchains, 'compiler', 'my_compiler.py'), my_compiler_txt)
my_tc_bis = os.path.join(self.test_prefix, 'my_tc.py')
write_file(my_tc_bis, '')
# include custom toolchains
glob_paths = [os.path.join(my_toolchains, '*.py'), os.path.join(my_toolchains, '*', '*.py'), my_tc_bis]
included_tcs_path = include_toolchains(self.test_prefix, glob_paths)
expected_paths = ['__init__.py', 'toolchains/__init__.py', 'toolchains/compiler/__init__.py',
'toolchains/my_tc.py', 'toolchains/compiler/my_compiler.py']
for filepath in expected_paths:
fullpath = os.path.join(included_tcs_path, 'easybuild', filepath)
self.assertTrue(os.path.exists(fullpath), "%s exists" % fullpath)
# path to included MNSs should be prepended to Python search path
self.assertEqual(sys.path[0], included_tcs_path)
# importing custom MNS should work
import easybuild.toolchains.my_tc
my_tc_pyc_path = easybuild.toolchains.my_tc.__file__
my_tc_real_py_path = os.path.realpath(os.path.join(os.path.dirname(my_tc_pyc_path), 'my_tc.py'))
self.assertTrue(os.path.samefile(up(my_tc_real_py_path, 1), my_toolchains))
def test_is_software_specific_easyblock(self):
"""Test is_software_specific_easyblock function."""
self.assertErrorRegex(EasyBuildError, "No such file", is_software_specific_easyblock, '/no/such/easyblock.py')
testdir = os.path.dirname(os.path.abspath(__file__))
test_easyblocks = os.path.join(testdir, 'sandbox', 'easybuild', 'easyblocks')
self.assertTrue(is_software_specific_easyblock(os.path.join(test_easyblocks, 'g', 'gcc.py')))
self.assertTrue(is_software_specific_easyblock(os.path.join(test_easyblocks, 't', 'toy.py')))
self.assertFalse(is_software_specific_easyblock(os.path.join(test_easyblocks, 'generic', 'configuremake.py')))
self.assertFalse(is_software_specific_easyblock(os.path.join(test_easyblocks, 'generic', 'toolchain.py')))
def suite():
""" returns all the testcases in this module """
return TestLoaderFiltered().loadTestsFromTestCase(IncludeTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
| gpl-2.0 | 2,562,027,152,621,121 | 46.287313 | 120 | 0.657145 | false |
feist/pcs | pcs/lib/booth/test/test_resource.py | 1 | 7364 | from unittest import mock, TestCase
from lxml import etree
import pcs.lib.booth.resource as booth_resource
def fixture_resources_with_booth(booth_config_file_path):
return etree.fromstring('''
<resources>
<primitive type="booth-site">
<instance_attributes>
<nvpair name="config" value="{0}"/>
</instance_attributes>
</primitive>
</resources>
'''.format(booth_config_file_path))
def fixture_booth_element(_id, booth_config_file_path):
return etree.fromstring('''
<primitive id="{0}" type="booth-site">
<instance_attributes>
<nvpair name="config" value="{1}"/>
</instance_attributes>
</primitive>
'''.format(_id, booth_config_file_path))
def fixture_ip_element(_id, ip=""):
return etree.fromstring('''
<primitive id="{0}" type="IPaddr2">
<instance_attributes id="{0}-ia">
<nvpair
id="booth-booth-{0}-ia-ip"
name="ip"
value="{1}"
/>
</instance_attributes>
</primitive>
'''.format(_id, ip))
class CreateResourceIdTest(TestCase):
@mock.patch("pcs.lib.booth.resource.find_unique_id")
def test_return_new_uinq_id(self, mock_find_unique_id):
resources_section = etree.fromstring('''<resources/>''')
mock_find_unique_id.side_effect = (
lambda resources_section, _id: "{0}-n".format(_id)
)
self.assertEqual(
"booth-some-name-ip-n",
booth_resource.create_resource_id(
resources_section, "some-name", "ip"
)
)
class FindBoothResourceElementsTest(TestCase):
def test_returns_empty_list_when_no_matching_booth_element(self):
self.assertEqual([], booth_resource.find_for_config(
fixture_resources_with_booth("/ANOTHER/PATH/TO/CONF"),
"/PATH/TO/CONF"
))
def test_returns_all_found_resource_elements(self):
resources = etree.fromstring('<resources/>')
first = fixture_booth_element("first", "/PATH/TO/CONF")
second = fixture_booth_element("second", "/ANOTHER/PATH/TO/CONF")
third = fixture_booth_element("third", "/PATH/TO/CONF")
for element in [first, second, third]:
resources.append(element)
self.assertEqual(
[first, third],
booth_resource.find_for_config(
resources,
"/PATH/TO/CONF"
)
)
class RemoveFromClusterTest(TestCase):
@staticmethod
def call(element_list):
mock_resource_remove = mock.Mock()
booth_resource.get_remover(mock_resource_remove)(element_list)
return mock_resource_remove
@staticmethod
def find_booth_resources(tree):
return tree.xpath('.//primitive[@type="booth-site"]')
def test_remove_ip_when_is_only_booth_sibling_in_group(self):
group = etree.fromstring('''
<group>
<primitive id="ip" type="IPaddr2"/>
<primitive id="booth" type="booth-site">
<instance_attributes>
<nvpair name="config" value="/PATH/TO/CONF"/>
</instance_attributes>
</primitive>
</group>
''')
mock_resource_remove = self.call(self.find_booth_resources(group))
self.assertEqual(
mock_resource_remove.mock_calls, [
mock.call('ip'),
mock.call('booth'),
]
)
def test_remove_ip_when_group_is_disabled_1(self):
group = etree.fromstring('''
<group>
<primitive id="ip" type="IPaddr2"/>
<primitive id="booth" type="booth-site">
<instance_attributes>
<nvpair name="config" value="/PATH/TO/CONF"/>
</instance_attributes>
</primitive>
<meta_attributes>
<nvpair name="target-role" value="Stopped"/>
</meta_attributes>
</group>
''')
mock_resource_remove = self.call(self.find_booth_resources(group))
self.assertEqual(
mock_resource_remove.mock_calls, [
mock.call('ip'),
mock.call('booth'),
]
)
def test_remove_ip_when_group_is_disabled_2(self):
group = etree.fromstring('''
<group>
<meta_attributes>
<nvpair name="target-role" value="Stopped"/>
</meta_attributes>
<primitive id="ip" type="IPaddr2"/>
<primitive id="booth" type="booth-site">
<instance_attributes>
<nvpair name="config" value="/PATH/TO/CONF"/>
</instance_attributes>
</primitive>
</group>
''')
mock_resource_remove = self.call(self.find_booth_resources(group))
self.assertEqual(
mock_resource_remove.mock_calls, [
mock.call('ip'),
mock.call('booth'),
]
)
def test_dont_remove_ip_when_group_has_other_resources(self):
group = etree.fromstring('''
<group>
<primitive id="ip" type="IPaddr2"/>
<primitive id="booth" type="booth-site">
<instance_attributes>
<nvpair name="config" value="/PATH/TO/CONF"/>
</instance_attributes>
</primitive>
<primitive id="dummy" type="Dummy"/>
</group>
''')
mock_resource_remove = self.call(self.find_booth_resources(group))
self.assertEqual(
mock_resource_remove.mock_calls, [
mock.call('booth'),
]
)
class FindBoundIpTest(TestCase):
@staticmethod
def fixture_resource_section(ip_element_list):
resources_section = etree.fromstring('<resources/>')
group = etree.SubElement(resources_section, "group")
group.append(fixture_booth_element("booth1", "/PATH/TO/CONF"))
for ip_element in ip_element_list:
group.append(ip_element)
return resources_section
def test_returns_none_when_no_ip(self):
self.assertEqual(
[],
booth_resource.find_bound_ip(
self.fixture_resource_section([]),
"/PATH/TO/CONF",
)
)
def test_returns_ip_when_correctly_found(self):
self.assertEqual(
["192.168.122.31"],
booth_resource.find_bound_ip(
self.fixture_resource_section([
fixture_ip_element("ip1", "192.168.122.31"),
]),
"/PATH/TO/CONF",
)
)
def test_returns_none_when_more_ip(self):
self.assertEqual(
["192.168.122.31", "192.168.122.32"],
booth_resource.find_bound_ip(
self.fixture_resource_section([
fixture_ip_element("ip1", "192.168.122.31"),
fixture_ip_element("ip2", "192.168.122.32"),
]),
"/PATH/TO/CONF",
)
)
| gpl-2.0 | 6,868,946,752,976,943,000 | 33.092593 | 74 | 0.519283 | false |
marcharper/stationary | stationary/utils/math_helpers.py | 1 | 5555 | import numpy
from numpy import log
try:
from scipy.misc import logsumexp
except ImportError:
from numpy import logaddexp
logsumexp = logaddexp.reduce
def slice_dictionary(d, N, slice_index=0, slice_value=0):
"""
Take a three dimensional slice from a four dimensional
dictionary.
"""
slice_dict = dict()
for state in simplex_generator(N, 2):
new_state = list(state)
new_state.insert(slice_index, slice_value)
slice_dict[state] = d[tuple(new_state)]
return slice_dict
def squared_error(d1, d2):
"""
Compute the squared error between two vectors.
"""
s = 0.
for k in range(len(d1)):
s += (d1[k] - d2[k])**2
return numpy.sqrt(s)
def squared_error_dict(d1, d2):
"""
Compute the squared error between two vectors, stored as dictionaries.
"""
s = 0.
for k in d1.keys():
s += (d1[k] - d2[k])**2
return numpy.sqrt(s)
def multiply_vectors(a, b):
c = []
for i in range(len(a)):
c.append(a[i]*b[i])
return c
def dot_product(a, b):
c = 0
for i in range(len(a)):
c += a[i] * b[i]
return c
def normalize(x):
s = float(sum(x))
for j in range(len(x)):
x[j] /= s
return x
def normalize_dictionary(x):
s = float(sum(x.values()))
for k in x.keys():
x[k] /= s
return x
def inc_factorial(x, n):
p = 1.
for i in range(0, n):
p *= (x + i)
return p
def factorial(i):
p = 1.
for j in range(2, i+1):
p *= j
return p
def log_inc_factorial(x,n):
p = 1.
for i in range(0, n):
p += log(x + i)
return p
def log_factorial(i):
p = 1.
for j in range(2, i+1):
p += log(j)
return p
def simplex_generator(N, d=2):
"""
Generates a discretation of the simplex.
Parameters
----------
N: int
The number of subdivsions in each dimension
d: int, 2
The dimension of the simplex (the number of population types is d+1
Yields
------
(d+1)-tuples of numbers summing to N. The total number of yielded tuples is
equal to the simplicial polytopic number corresponding to N and d,
binom{N + d - 1}{d} (see https://en.wikipedia.org/wiki/Figurate_number )
"""
if d == 1:
for i in range(N+1):
yield (i, N - i)
if d > 1:
for j in range(N+1):
for s in simplex_generator(N - j, d - 1):
t = [j]
t.extend(s)
yield tuple(t)
def one_step_generator(d):
"""
Generates the arrays needed to construct neighboring states one step away
from a state in the dimension d simplex.
"""
if d == 1:
yield [1, -1]
yield [-1, 1]
return
for plus_index in range(d + 1):
for minus_index in range(d + 1):
if minus_index == plus_index:
continue
step = [0] * (d + 1)
step[plus_index] = 1
step[minus_index] = -1
yield step
def one_step_indicies_generator(d):
"""
Generates the indices that form all the neighboring states, by adding +1 in
one index and -1 in another.
"""
if d == 1:
yield [0, 1]
yield [1, 0]
return
for plus_index in range(d + 1):
for minus_index in range(d + 1):
if minus_index == plus_index:
continue
yield (plus_index, minus_index)
def kl_divergence(p, q):
"""
Computes the KL-divergence or relative entropy of to input distributions.
Parameters
----------
p, q: lists
The probability distributions to compute the KL-divergence for
Returns
-------
float, the KL-divergence of p and q
"""
s = 0.
for i in range(len(p)):
if p[i] == 0:
continue
if q[i] == 0:
return float('nan')
try:
s += p[i] * log(p[i])
except (ValueError, ZeroDivisionError):
continue
try:
s -= p[i] * log(q[i])
except (ValueError, ZeroDivisionError):
continue
return s
def kl_divergence_dict(p, q):
"""
Computes the KL-divergence of distributions given as dictionaries.
"""
s = 0.
p_list = []
q_list = []
for i in p.keys():
p_list.append(p[i])
q_list.append(q[i])
return kl_divergence(p_list, q_list)
def q_divergence(q):
"""
Returns the divergence function corresponding to the parameter value q. For
q == 0 this function is one-half the squared Euclidean distance. For q == 1
this function returns the KL-divergence.
"""
if q == 0:
def d(x, y):
return 0.5 * numpy.dot((x - y), (x - y))
return d
if q == 1:
return kl_divergence
if q == 2:
def d(x, y):
s = 0.
for i in range(len(x)):
s += log(x[i] / y[i]) + 1 - x[i] / y[i]
return -s
return d
q = float(q)
def d(x, y):
s = 0.
for i in range(len(x)):
s += (numpy.power(y[i], 2 - q) - numpy.power(x[i], 2 - q)) / (2 - q)
s -= numpy.power(y[i], 1 - q) * (y[i] - x[i])
s = -s / (1 - q)
return s
return d
def shannon_entropy(p):
s = 0.
for i in range(len(p)):
try:
s += p[i] * log(p[i])
except ValueError:
continue
return -1. * s
def binary_entropy(p):
return -p * log(p) - (1 - p) * log(1 - p)
| mit | -3,417,794,059,619,520,500 | 20.614786 | 80 | 0.514131 | false |
nbstrauli/influenza_vaccination_project | scripts/pipeline/get_gene_usage_time_progression.py | 1 | 14047 | #!/usr/bin/python
#$ -S /usr/bin/python
#$ -e error
#$ -cwd
#$ -r y
#$ -j y
#$ -l mem_free=2G
#$ -l arch=linux-x64
#$ -l netapp=1G,scratch=180G
#$ -l h_rt=336:00:00
import sys
import os
import itertools
def make_dic_of_all_gene_names(gene_name_input_filepaths, evalue_input_filepaths, evalue_cutoff, drop_allele_info):
"""This script goes through all of the gene input files (provided by 'gene_name_input_filepaths') as well as the evalue input files (provided by 'evalue_input_filepaths') for all time-points. It does this to create a dictionary of all the gene names that exist (for this gene class), which have a corresponding evalue that is lower than 'evalue_cutoff'. Returns this dictionary, where each index is a gene name, and its definition is the number of counts that this gene name was found (and satisfied the evalue cutoff)."""
"""evalues can only be a float or 'N/A'"""
gene_names_dic = {}
for i, j, in itertools.izip(gene_name_input_filepaths, evalue_input_filepaths):
gene_name_filein = open(i, "r")
evalue_filein = open(j, "r")
for k, l in itertools.izip(gene_name_filein, evalue_filein):
evalue = l.split('\t')[1][:-1]
if evalue == 'N/A':
continue
elif float(evalue) > evalue_cutoff:
continue
elif float(evalue) <= evalue_cutoff:
gene_name = k.split('\t')[1][:-1]
if gene_name == 'N/A':
print 'error: found a defined evalue with and undefined gene name'
sys.stdout.flush()
return
if drop_allele_info:
#remove the allele information from the gene name
gene_name = gene_name.split('*')[0]
try:
gene_names_dic[gene_name] += 1
except KeyError:
gene_names_dic[gene_name] = 1
gene_name_filein.close()
evalue_filein.close()
print '\tgot gene names for:', i
sys.stdout.flush()
return gene_names_dic
def make_dic_of_all_cdrs(cdr3_seqs_input_filepaths):
cdr3_seqs_dic = {}
for i in cdr3_seqs_input_filepaths:
filein = open(i, "r")
for j in filein:
cdr3_seq = j[:-1].split('\t')[1]
try:
cdr3_seqs_dic[cdr3_seq] += 1
except KeyError:
cdr3_seqs_dic[cdr3_seq] = 1
filein.close()
return cdr3_seqs_dic
def get_gene_usage_foreach_tpoint(gene_name_input_filepaths, evalue_input_filepaths, gene_names_dic, evalue_cutoff, drop_allele_info):
"""This script is very similar to 'make_dic_of_all_gene_names', except instead of getting a gene name dictionary for all time-points, its make an individual dictionary for each time-point. It uses the dictionary for all time-points ('gene_names_dic') to prime the dics for each time-point (which are stored in memory as a list of dics (in chronological order)). It then loops through the gene name and evalue files (provided by 'gene_name_input_filepaths' and 'evalue_input_filepaths', respectively) to get all the gene names from alignments that satisfy the evalue cutoff, for each time-point. Returns a list of dictionaries, where each dic corresponds to a time-point, and the last element in the dictionary-list is all time-points combined."""
all_tpoints_gene_names_dics = [] #list of dicitonaries for each time-point
for i, j in itertools.izip(gene_name_input_filepaths, evalue_input_filepaths):
gene_name_filein = open(i, "r")
evalue_filein = open(j, "r")
tpoint_gene_names_dic = {}
#prime the gene name dictionay for this time-point
for l in gene_names_dic:
tpoint_gene_names_dic[l] = 0
for l, m in itertools.izip(gene_name_filein, evalue_filein):
evalue = m.split('\t')[1][:-1]
if evalue == 'N/A':
continue
elif float(evalue) > evalue_cutoff:
continue
elif float(evalue) <= evalue_cutoff:
gene_name = l.split('\t')[1][:-1]
if drop_allele_info:
#remove allele information
gene_name = gene_name.split('*')[0]
tpoint_gene_names_dic[gene_name] += 1
gene_name_filein.close()
evalue_filein.close()
all_tpoints_gene_names_dics.append(tpoint_gene_names_dic)
return all_tpoints_gene_names_dics
def get_cdr3_usage_foreach_tpoint(cdr3_seqs_input_filepaths, cdr3_seqs_dic):
all_tpoints_cdr3_seqs_dics = [] #list of dicitonaries for each time-point
for i in cdr3_seqs_input_filepaths:
filein = open(i, "r")
tpoint_cdr3_seqs_dic = {}
#prime the dictionay for this time-point
for j in cdr3_seqs_dic:
tpoint_cdr3_seqs_dic[j] = 0
for j in filein:
cdr3_seq = j[:-1].split('\t')[1]
tpoint_cdr3_seqs_dic[cdr3_seq] += 1
filein.close()
all_tpoints_cdr3_seqs_dics.append(tpoint_cdr3_seqs_dic)
return all_tpoints_cdr3_seqs_dics
def get_gene_lengths(ref_seq_dirpath, gene_class, drop_allele_info):
gene_class_dic = {'vgene_heavy':'IGHV', 'dgene_heavy':'IGHD', 'jgene_heavy':'IGHJ', 'vgene_lambda':'IGLV', 'jgene_lambda':'IGLJ', 'vgene_kappa':'IGKV', 'jgene_kappa':'IGKJ'}
ref_seq_filepath = '%s%s.fasta' % (ref_seq_dirpath, gene_class_dic[gene_class])
filein = open(ref_seq_filepath, "r")
gene_lens_dic = {}
for i in filein:
if i[0] == '>':
gene_name = i[1:-1]
if drop_allele_info:
gene_name = gene_name.split('*')[0]
else:
length = float(len(i[:-1]))
try:
gene_lens_dic[gene_name].append(length)
except KeyError:
gene_lens_dic[gene_name] = [length]
new_gene_lens_dic = {}
for i in gene_lens_dic:
if drop_allele_info:
new_gene_lens_dic[i] = round(sum(gene_lens_dic[i]) / len(gene_lens_dic[i]), 0)
else:
new_gene_lens_dic[i] = gene_lens_dic[i][0]
return new_gene_lens_dic
def get_total_mapped_reads(ig_expression_filepath):
filein = open(ig_expression_filepath, "r")
filein.readline()
total_mapped_reads = []
for i in filein:
total_mapped_reads.append(float(i.split('\t')[1]))
filein.close()
return total_mapped_reads
def normalize_counts(all_tpoints_gene_names_dics, gene_lens_dic, total_mapped_reads, scaling_factor):
#for each time-point
for i in xrange(len(total_mapped_reads)):
mapped_reads = total_mapped_reads[i]
#for each gene found in the data
for j in all_tpoints_gene_names_dics[i]:
#if this is CDR3 seq data
if gene_lens_dic == 'cdr3':
expression_level = (all_tpoints_gene_names_dics[i][j] / mapped_reads) * scaling_factor
else:
gene_length = gene_lens_dic[j]
expression_level = (all_tpoints_gene_names_dics[i][j] / (mapped_reads * gene_length)) * scaling_factor
all_tpoints_gene_names_dics[i][j] = expression_level
return all_tpoints_gene_names_dics
def write_output(all_tpoints_gene_names_dics, tpoint_ids, gene_usage_output_dirpath, gene_class):
#this will be a list if lists, where each element corresponds to a gene,
#within a gene entry, the 1st element is the range of that gene's
#expression trajectory, followed by the gene name, then the actual
#expression trajectory
gene_expr_trajs = []
for i in all_tpoints_gene_names_dics[0]:
gene_name = i
expr_traj = []
for j in all_tpoints_gene_names_dics:
expr_traj.append(j[gene_name])
range = max(expr_traj) - min(expr_traj)
gene_expr_trajs.append([range, gene_name, expr_traj])
#sort according to the range of the expression trajectories
gene_expr_trajs = sorted(gene_expr_trajs)
#if this is CDR3 seq data
if gene_class == 'cdr3':
fileout = open(gene_usage_output_dirpath, "w")
else:
output_filepath = gene_usage_output_dirpath + gene_class
fileout = open(output_filepath, "w")
fileout.write('\t' + '\t'.join([str(i) for i in tpoint_ids]) + '\n')
for i in gene_expr_trajs:
fileout.write(i[1] + '\t' + '\t'.join([str(j) for j in i[2]]) + '\n')
fileout.close()
return
def run(gene_names_master_dirpath, evalues_master_dirpath, gene_usage_output_dirpath, gene_class, evalue_cutoff, drop_allele_info, ref_seq_dirpath, ig_expression_filepath, scaling_factor):
"""This script runs the pipeline. The program gets the frequency of each reference gene (of a given gene class) for each snyderome time-point. It writes these results to 'gene_usage_output_dirpath'."""
if gene_names_master_dirpath[-1] != '/':
gene_names_master_dirpath += '/'
if evalues_master_dirpath[-1] != '/':
evalues_master_dirpath += '/'
if gene_usage_output_dirpath[-1] != '/':
gene_usage_output_dirpath += '/'
if not os.path.exists(gene_usage_output_dirpath):
os.makedirs(gene_usage_output_dirpath)
if ref_seq_dirpath[-1] != '/':
ref_seq_dirpath += '/'
#get input gene name and evalue filepaths, and make sure
#they are in numerical order according to the time points
tpoint_ids = []
gene_name_input_filepaths = []
for i in os.listdir(gene_names_master_dirpath):
if i[0] == '.':
continue
tpoint_ids.append(int(i))
gene_name_input_filepaths.append([int(i), gene_names_master_dirpath + i + '/' + gene_class + '_name'])
gene_name_input_filepaths = [i[1] for i in sorted(gene_name_input_filepaths)]
tpoint_ids = sorted(tpoint_ids)
evalue_input_filepaths = []
for i in os.listdir(evalues_master_dirpath):
if i[0]== '.':
continue
evalue_input_filepaths.append([int(i), evalues_master_dirpath + i + '/' + gene_class + '_evalue'])
evalue_input_filepaths = [i[1] for i in sorted(evalue_input_filepaths)]
print 'getting dic of all gene names'
sys.stdout.flush()
gene_names_dic = make_dic_of_all_gene_names(gene_name_input_filepaths, evalue_input_filepaths, evalue_cutoff, drop_allele_info)
print 'getting dic for individual time-points'
sys.stdout.flush()
all_tpoints_gene_names_dics = get_gene_usage_foreach_tpoint(gene_name_input_filepaths, evalue_input_filepaths, gene_names_dic, evalue_cutoff, drop_allele_info)
print "normalizing counts"
gene_lens_dic = get_gene_lengths(ref_seq_dirpath, gene_class, drop_allele_info)
total_mapped_reads = get_total_mapped_reads(ig_expression_filepath)
all_tpoints_gene_names_dics = normalize_counts(all_tpoints_gene_names_dics, gene_lens_dic, total_mapped_reads, scaling_factor)
print "writing output"
write_output(all_tpoints_gene_names_dics, tpoint_ids, gene_usage_output_dirpath, gene_class)
return
def run_array(gene_names_master_dirpath, evalues_master_dirpath, gene_usage_output_dirpath, evalue_cutoff, drop_allele_info, ref_seq_dirpath, ig_expression_filepath, scaling_factor, gene_classes):
"""NOTE: This module should be updated to write the order of gene names in the gene_usage_output_files as a seperate output file in its own directory. This was done for 'get_gene_usage_time_progression_no_normalize.py', but didn't get a chance to do it for this script. Mostly because we would have to go back to the influenza pipeline and update that as well. Should do this update if we ever need this module again."""
"""UPDATE: We added the feature to write the gene name order files, but have not updated the pipeline. Must do this soon!"""
"""UPDATE: We added it to the pipeline."""
if drop_allele_info == 'True':
drop_allele_info = True
elif drop_allele_info == 'False':
drop_allele_info = False
else:
print 'The "drop_allele_info" variable must be either True or False'
print 'It is currently set to:', drop_allele_info
return
sge_task_id = int(os.environ['SGE_TASK_ID'])
gene_class = gene_classes[sge_task_id - 1]
run(gene_names_master_dirpath, evalues_master_dirpath, gene_usage_output_dirpath, gene_class, evalue_cutoff, drop_allele_info, ref_seq_dirpath, ig_expression_filepath, scaling_factor)
return
def run_cdr3(cdr3_seqs_dirpath, cdr3_usage_output_filepath, ig_expression_filepath, scaling_factor):
if cdr3_seqs_dirpath[-1] != '/':
cdr3_seqs_dirpath += '/'
tpoint_ids = []
cdr3_seqs_input_filepaths = []
for i in os.listdir(cdr3_seqs_dirpath):
if i[0] == '.':
continue
tpoint_ids.append(int(i))
cdr3_seqs_input_filepaths.append([int(i), cdr3_seqs_dirpath + i])
cdr3_seqs_input_filepaths = [i[1] for i in sorted(cdr3_seqs_input_filepaths)]
tpoint_ids = sorted(tpoint_ids)
print 'getting dic of all cdr3 seqs'
sys.stdout.flush()
cdr3_seqs_dic = make_dic_of_all_cdrs(cdr3_seqs_input_filepaths)
print 'getting dic for individual time-points'
all_tpoints_cdr3_seqs_dics = get_cdr3_usage_foreach_tpoint(cdr3_seqs_input_filepaths, cdr3_seqs_dic)
print "normalizing counts"
total_mapped_reads = get_total_mapped_reads(ig_expression_filepath)
all_tpoints_cdr3_seqs_dics = normalize_counts(all_tpoints_cdr3_seqs_dics, 'cdr3', total_mapped_reads, scaling_factor)
print "writing output"
write_output(all_tpoints_cdr3_seqs_dics, tpoint_ids, cdr3_usage_output_filepath, 'cdr3')
return
if __name__ == '__main__':
if len(sys.argv[1:]) > 9:
run_array(sys.argv[1], sys.argv[2], sys.argv[3], float(sys.argv[4]), sys.argv[5], sys.argv[6], sys.argv[7], float(sys.argv[8]), sys.argv[9:])
elif len(sys.argv[1:]) == 9:
run(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], float(sys.argv[5]), sys.argv[6], sys.argv[7], sys.argv[8], float(sys.argv[9]))
elif len(sys.argv[1:]) == 4:
run_cdr3(sys.argv[1], sys.argv[2], sys.argv[3], float(sys.argv[4]))
| cc0-1.0 | -8,732,614,653,426,692,000 | 50.643382 | 750 | 0.635865 | false |
yastrov/py-txt2fb2 | txt2fb2lib/bookcreator.py | 1 | 4706 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Convert plain txt file to FB2 (FictionBook) format.
Include devision by paragraph and include image files.
Very Simple Finity State Machine here.
Required Python 3.4 or higher.
"""
__author__ = 'Yuri Astrov <[email protected]>'
__copyright__ = "Copyright 2014, Txt2FB2"
__credits__ = ["Yuri Astrov", ]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Yuriy Astrov"
__email__ = "[email protected]"
__all__ = ['txt2fb2',]
import re
from enum import Enum, unique
@unique
class STATE(Enum):
MAKING_SECTION = 1
MAKING_TITLE = 2
MAKING_TEXT = 3
MAKING_IMG = 4
INIT = 5
from txt2fb2lib.titlematcher import testTitle, testNewPart
testTitle = re.compile(testTitle, re.UNICODE)
testNewPart = re.compile(testNewPart, re.UNICODE)
from txt2fb2lib.fb2creator import FB2Creator
from txt2fb2lib.booktokenize import tokenize, Data
def stack_to_str(__stack):
while __stack and __stack[0].tag == "SPACE":
__stack.pop(0)
s = ''
if __stack:
s = ''.join( map(lambda x: x[0], __stack) )
__stack.clear()
return s
def txt2fb2(fdiscr, encoding=None,
covername=None, title=None,
annotation=None, dir_name=None,
author_firstname=None, author_lastname=None,
author_middlename=None,
genres=None):
FB2 = FB2Creator()
FB2.make_titleinfo(title=title, cover=covername,
genres=genres)
FB2.set_book_author(lastdname=author_lastname,
firstname=author_firstname,
middlename=author_middlename)
FB2.make_documentinfo()
FB2.set_doc_author(lastdname=author_lastname,
firstname=author_firstname,
middlename=author_middlename)
__state = STATE.INIT
__stack = []
flag_part_previous = 0
flag_point_previous = 0
for token in tokenize(fdiscr.readline, encoding=encoding):
val, tag, pos, line_num, line_v = token
if __state == STATE.INIT:
if testNewPart.match(line_v):
FB2.open_section()
flag_part_previous = 1
__state = STATE.MAKING_TITLE
elif testTitle.match(line_v):
FB2.open_section()
__state = STATE.MAKING_TITLE
else: __stack.append(token)
__stack.append(token)
elif __state == STATE.MAKING_TITLE:
if tag in ['ENDOFLINE',]:
s = stack_to_str(__stack)
FB2.make_title(s)
__state = STATE.MAKING_TEXT
elif tag =='SPACE' and pos == 0:
pass
else:
__stack.append(token)
elif __state == STATE.MAKING_TEXT:
if tag == 'IMGFILEPATH':
if __stack:
s = stack_to_str(__stack)
FB2.make_p(s)
FB2.add_image(val, dir_name=dir_name)
elif pos == 0 and tag == 'ENDOFLINE':#
s = stack_to_str(__stack)
FB2.make_p(s)
FB2.make_emptyline()
elif tag == 'ENDOFLINE':
pass
elif testNewPart.match(line_v):
FB2.make_p(stack_to_str(__stack))
FB2.close_section()
FB2.close_section()
FB2.open_section()
__state = STATE.MAKING_TITLE
__stack.append(token)
flag_part_previous = 1
elif testTitle.match(line_v):
FB2.make_p(stack_to_str(__stack))
if flag_part_previous != 1:
FB2.close_section()
flag_part_previous = 0
FB2.open_section()
__state = STATE.MAKING_TITLE
__stack.append(token)
elif pos == 0 and tag in ["SPACE", 'TIRE', 'CAVYCHKI'] and flag_point_previous:
FB2.make_p(stack_to_str(__stack) )
__state = STATE.MAKING_TEXT
if tag != 'SPACE':
__stack.append(token)
flag_point_previous = 0
else:
if pos == 0 and val[0].islower():
__stack.append( Data(' ', 'SPACE', 0, 0, '') )
if tag == 'ENDOFSENTENS':
flag_point_previous = 1
elif tag != 'SPACE':
flag_point_previous = 0
__stack.append(token)
elif __state == STATE.MAKING_IMG:
FB2.add_image(val)
continue
if __stack:
s = stack_to_str(__stack)
FB2.make_p(s)
return FB2 | mit | -1,994,880,033,976,461,300 | 32.382979 | 91 | 0.514237 | false |
google/python-spanner-orm | spanner_orm/metadata.py | 1 | 4669 | # python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hold information about a Model extracted from the class attributes."""
from typing import Any, Dict, Type, Optional
from spanner_orm import error
from spanner_orm import field
from spanner_orm import foreign_key_relationship
from spanner_orm import index
from spanner_orm import registry
from spanner_orm import relationship
class ModelMetadata(object):
"""Hold information about a Model extracted from the class attributes."""
def __init__(self,
table: Optional[str] = None,
fields: Optional[Dict[str, field.Field]] = None,
relations: Optional[Dict[str, relationship.Relationship]] = None,
foreign_key_relations: Optional[Dict[
str,
foreign_key_relationship.ForeignKeyRelationship]] = None,
indexes: Optional[Dict[str, index.Index]] = None,
interleaved: Optional[str] = None,
model_class: Optional[Type[Any]] = None):
self.columns = []
self.fields = dict(fields or {})
self._finalized = False
self.indexes = dict(indexes or {})
self.interleaved = interleaved
self.model_class = model_class
self.primary_keys = []
self.relations = dict(relations or {})
self.foreign_key_relations = dict(foreign_key_relations or {})
self.table = table or ''
def finalize(self) -> None:
"""Finish generating metadata state.
Some metadata depends on having all configuration data set before it can
be calculated--the primary index, for example, needs all fields to be added
before it can be calculated. This method is called to indicate that all
relevant state has been added and the calculation of the final data should
now happen.
"""
if self._finalized:
raise error.SpannerError('Metadata was already finalized')
sorted_fields = list(sorted(self.fields.values(), key=lambda f: f.position))
if index.Index.PRIMARY_INDEX not in self.indexes:
primary_keys = [f.name for f in sorted_fields if f.primary_key()]
primary_index = index.Index(primary_keys)
primary_index.name = index.Index.PRIMARY_INDEX
self.indexes[index.Index.PRIMARY_INDEX] = primary_index
self.primary_keys = self.indexes[index.Index.PRIMARY_INDEX].columns
self.columns = [f.name for f in sorted_fields]
for _, relation in self.relations.items():
relation.origin = self.model_class
registry.model_registry().register(self.model_class)
self._finalized = True
def add_metadata(self, metadata: 'ModelMetadata') -> None:
self.table = metadata.table or self.table
self.fields.update(metadata.fields)
self.relations.update(metadata.relations)
self.indexes.update(metadata.indexes)
self.interleaved = metadata.interleaved or self.interleaved
def add_field(self, name: str, new_field: field.Field) -> None:
new_field.name = name
new_field.position = len(self.fields)
self.fields[name] = new_field
def add_relation(self, name: str,
new_relation: relationship.Relationship) -> None:
new_relation.name = name
self.relations[name] = new_relation
def add_foreign_key_relation(
self,
name: str,
new_relation: foreign_key_relationship.ForeignKeyRelationship,
) -> None:
new_relation.name = name
self.foreign_key_relations[name] = new_relation
def add_index(self, name: str, new_index: index.Index) -> None:
new_index.name = name
self.indexes[name] = new_index
| apache-2.0 | 2,871,411,075,835,277,300 | 38.235294 | 80 | 0.701649 | false |
del680202/MachineLearning-memo | src/tensorflow/autocoder.py | 1 | 6809 | # View more python learning tutorial on my Youtube and Youku channel!!!
# My tutorial website: https://morvanzhou.github.io/tutorials/
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Visualize decoder setting
# Parameters
learning_rate = 0.01
training_epochs = 5
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
"""
# Visualize encoder setting
# Parameters
learning_rate = 0.01 # 0.01 this learning rate will be better! Tested
training_epochs = 10
batch_size = 256
display_step = 1
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 128
n_hidden_2 = 64
n_hidden_3 = 10
n_hidden_4 = 2
weights = {
'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1],)),
'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],)),
'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3],)),
'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4],)),
'decoder_h1': tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3],)),
'decoder_h2': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2],)),
'decoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1],)),
'decoder_h4': tf.Variable(tf.truncated_normal([n_hidden_1, n_input],)),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),
'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_3])),
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b3': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b4': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']),
biases['encoder_b3']))
layer_4 = tf.add(tf.matmul(layer_3, weights['encoder_h4']),
biases['encoder_b4'])
return layer_4
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']),
biases['decoder_b3']))
layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['decoder_h4']),
biases['decoder_b4']))
return layer_4
"""
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.initialize_all_variables())
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size) # max(x) = 1, min(x) = 0
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# # Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) #Raw image
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) # encode -> decode image
plt.show()
# encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images})
# plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels)
# plt.colorbar()
# plt.show()
| apache-2.0 | -2,241,710,524,108,010,000 | 36.005435 | 93 | 0.622705 | false |
Donkyhotay/MoonPy | zope/server/http/httptask.py | 1 | 7688 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""HTTP Task
An HTTP task that can execute an HTTP request with the help of the channel and
the server it belongs to.
$Id: httptask.py 41111 2006-01-03 19:02:50Z jim $
"""
import socket
import time
from zope.server.http.http_date import build_http_date
from zope.publisher.interfaces.http import IHeaderOutput
from zope.server.interfaces import ITask
from zope.interface import implements
rename_headers = {
'CONTENT_LENGTH' : 'CONTENT_LENGTH',
'CONTENT_TYPE' : 'CONTENT_TYPE',
'CONNECTION' : 'CONNECTION_TYPE',
}
class HTTPTask(object):
"""An HTTP task accepts a request and writes to a channel.
Subclass this and override the execute() method.
"""
implements(ITask, IHeaderOutput) #, IOutputStream
instream = None
close_on_finish = 1
status = '200'
reason = 'Ok'
wrote_header = 0
accumulated_headers = None
bytes_written = 0
auth_user_name = ''
cgi_env = None
def __init__(self, channel, request_data):
self.channel = channel
self.request_data = request_data
self.response_headers = {
'Server': channel.server.SERVER_IDENT,
}
version = request_data.version
if version not in ('1.0', '1.1'):
# fall back to a version we support.
version = '1.0'
self.version = version
def service(self):
"""See zope.server.interfaces.ITask"""
try:
try:
self.start()
self.channel.server.executeRequest(self)
self.finish()
except socket.error:
self.close_on_finish = 1
if self.channel.adj.log_socket_errors:
raise
finally:
if self.close_on_finish:
self.channel.close_when_done()
def cancel(self):
"""See zope.server.interfaces.ITask"""
self.channel.close_when_done()
def defer(self):
"""See zope.server.interfaces.ITask"""
pass
def setResponseStatus(self, status, reason):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
self.status = status
self.reason = reason
def setResponseHeaders(self, mapping):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
self.response_headers.update(mapping)
def appendResponseHeaders(self, lst):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
accum = self.accumulated_headers
if accum is None:
self.accumulated_headers = accum = []
accum.extend(lst)
def wroteResponseHeader(self):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
return self.wrote_header
def setAuthUserName(self, name):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
self.auth_user_name = name
def prepareResponseHeaders(self):
version = self.version
# Figure out whether the connection should be closed.
connection = self.request_data.headers.get('CONNECTION', '').lower()
close_it = 0
response_headers = self.response_headers
if version == '1.0':
if connection == 'keep-alive':
if not ('Content-Length' in response_headers):
close_it = 1
else:
response_headers['Connection'] = 'Keep-Alive'
else:
close_it = 1
elif version == '1.1':
if connection == 'close':
close_it = 1
elif 'Transfer-Encoding' in response_headers:
if not response_headers['Transfer-Encoding'] == 'chunked':
close_it = 1
elif self.status == '304':
# Replying with headers only.
pass
elif not ('Content-Length' in response_headers):
close_it = 1
else:
# Close if unrecognized HTTP version.
close_it = 1
self.close_on_finish = close_it
if close_it:
self.response_headers['Connection'] = 'close'
def buildResponseHeader(self):
self.prepareResponseHeaders()
first_line = 'HTTP/%s %s %s' % (self.version, self.status, self.reason)
lines = [first_line] + ['%s: %s' % hv
for hv in self.response_headers.items()]
accum = self.accumulated_headers
if accum is not None:
lines.extend(accum)
res = '%s\r\n\r\n' % '\r\n'.join(lines)
return res
def getCGIEnvironment(self):
"""Returns a CGI-like environment."""
env = self.cgi_env
if env is not None:
# Return the cached copy.
return env
request_data = self.request_data
path = request_data.path
channel = self.channel
server = channel.server
while path and path.startswith('/'):
path = path[1:]
env = {}
env['REQUEST_METHOD'] = request_data.command.upper()
env['SERVER_PORT'] = str(server.port)
env['SERVER_NAME'] = server.server_name
env['SERVER_SOFTWARE'] = server.SERVER_IDENT
env['SERVER_PROTOCOL'] = "HTTP/%s" % self.version
env['CHANNEL_CREATION_TIME'] = channel.creation_time
env['SCRIPT_NAME']=''
env['PATH_INFO']='/' + path
query = request_data.query
if query:
env['QUERY_STRING'] = query
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
addr = channel.addr[0]
env['REMOTE_ADDR'] = addr
# If the server has a resolver, try to get the
# remote host from the resolver's cache.
resolver = getattr(server, 'resolver', None)
if resolver is not None:
dns_cache = resolver.cache
if addr in dns_cache:
remote_host = dns_cache[addr][2]
if remote_host is not None:
env['REMOTE_HOST'] = remote_host
env_has = env.has_key
for key, value in request_data.headers.items():
value = value.strip()
mykey = rename_headers.get(key, None)
if mykey is None:
mykey = 'HTTP_%s' % key
if not env_has(mykey):
env[mykey] = value
self.cgi_env = env
return env
def start(self):
now = time.time()
self.start_time = now
self.response_headers['Date'] = build_http_date (now)
def finish(self):
if not self.wrote_header:
self.write('')
hit_log = self.channel.server.hit_log
if hit_log is not None:
hit_log.log(self)
def write(self, data):
channel = self.channel
if not self.wrote_header:
rh = self.buildResponseHeader()
channel.write(rh)
self.bytes_written += len(rh)
self.wrote_header = 1
if data:
self.bytes_written += channel.write(data)
def flush(self):
self.channel.flush()
| gpl-3.0 | 1,702,733,660,290,183,000 | 31.714894 | 79 | 0.561655 | false |
bunyk/pyrrd | monitor_sources.py | 1 | 1431 | from decimal import Decimal
import requests
from tabulate import tabulate
import json
def yahoo_finance_query(**params):
''' Return the text of the request to the Yahoo finance API
s - ids of entities we wnant to receive. Every stock, index or currency has their own ID.
If you want to get values of more than one ID, separate them with ","
f - properties we ant to get. See
https://code.google.com/p/yahoo-finance-managed/wiki/enumQuoteProperty
'''
return requests.get('http://download.finance.yahoo.com/d/quotes.csv', params=params).text
def get_exchange_rate(fixed_currency, variable_currency):
''' Return tuple of last trade, ask and bid prices for given currencies '''
r = yahoo_finance_query(s=variable_currency + fixed_currency + '=X', f='l1a0b0')
return tuple(map(Decimal, r.split(',')))
def get_wikipedia_edits():
j = json.loads(requests.get(
'https://uk.wikipedia.org/w/api.php',
params={
'action': 'query',
'meta': 'siteinfo',
'siprop': 'statistics',
'continue': '',
'format': 'json',
}
).text)
return int(j['query']['statistics']['edits'])
def table():
print(tabulate(
(
('RUB/' + currency, ) + get_exchange_rate('RUB', currency)
for currency in ('EUR', 'USD', 'UAH')
),
headers = ('Pair', 'Trade', 'Ask', 'Bid')
))
| mit | 8,548,422,122,788,347,000 | 32.27907 | 93 | 0.606569 | false |
HansGR/ggPlayer | ggplayer/ggPlayer.py | 1 | 12818 | ## Copyright 2013 Hans Rinderknecht
## This file is part of ggPlayer.
## ggPlayer is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## ggPlayer is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with ggPlayer. If not, see <http://www.gnu.org/licenses/>.
import pyjd # this is dummy in pyjs.
from pyjamas import DOM
from pyjamas.ui.RootPanel import RootPanel, RootPanelCls, manageRootPanel
from pyjamas.ui.Button import Button
from pyjamas.ui.TextBox import TextBox
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Label import Label
from pyjamas.ui.FocusPanel import FocusPanel
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui import HasAlignment
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Label import Label
from pyjamas.Canvas.GWTCanvas import GWTCanvas
from pyjamas.Canvas.ImageLoader import loadImages
from pyjamas.Canvas import Color
from pyjamas.ui.Image import Image
#from pyjamas.Canvas2D import Canvas, CanvasImage, ImageLoadListener
from pyjamas.Timer import Timer
from pyjamas.ui.Widget import Widget
from pyjamas import Window
from pyjamas.ui import Event
from pyjamas.ui import KeyboardListener
from pyjamas.ui.KeyboardListener import KeyboardHandler
from pyjamas.ui.ClickListener import ClickHandler
from pyjamas.dnd import makeDraggable
from pyjamas.ui.DragWidget import DragWidget, DragContainer
from pyjamas.ui.DropWidget import DropWidget
from pyjamas.dnd import getTypes
import math
import pygwt
import random
# add the games to the path
from chess import Chess
# Define constants in the program
BOARDWIDTH = 600
BOARDHEIGHT = 600
GAMENAME = 'Chess'
LIGHT = Color.Color('#FDE6BE')
DARK = Color.Color('#695532')
COLORS = [LIGHT,DARK]
SELECT = Color.Color('#FF0000')
class GameCanvas(GWTCanvas):
"""Canvas containing the gameboard
Attributes:
width (int): Width of the board in pixels
height (int): Height of the board in pixels
gametype (str): Type of game to play
images (list): Location of gamepiece images (relative URL)
img_dict (list): Dictionary of image handles for each player [player] = {piece.name: handle}
game (Boardgame object): The active game in this canvas
Note:
It's worth thinking about making this an AbsolutePanel that incorporates
two or three independent Canvases. This will enable easy, independent
modification of the board, pieces, and selection (or animation). For
example, moving a piece currently requires redrawing the origin square to
erase the old piece image. Having pieces that are larger than the squares
they are on would require redrawing the entire board every turn
(of course, for general forward-compatibility, that's not a terrible idea,
and it probably doesn't take noticably longer to execute.)
"""
def __init__(self, w, h, game):
""" Initialize and resize the canvas; load the game.
:param w: The width of the canvas in pixels
:type w: int
:param h: The height of the canvas in pixels
:type h: int
"""
GWTCanvas.__init__(self, w, h)
self.setSize(w,h)
self.setStyleName('drophere')
self.setStyleAttribute('position', 'relative')
self.width = w
self.height = h
self.images = []
self.img_dict = []
for x in range(game.num_players):
self.img_dict.append({})
self.run = False
self.resize(self.width, self.height)
self.run = True
def reset(self, game):
"""Redraw the board and the pieces"""
self.drawBoard(game)
self.drawPieces(game)
def drawBoard(self, game):
"""Draw all cells in the board"""
# draw the cells
for cell in game.board.values():
self.drawCell(cell,COLORS)
def drawCellPath(self, gamecell):
"""Helper function, draw the border path of a cell"""
path, pathtype = gamecell.getPath()
path = [[a*self.width,b*self.height] for a,b in path]
self.beginPath()
self.moveTo(path[0][0],path[0][1])
for j in range(len(pathtype)):
if pathtype[j]=='line':
xi = int(path[j+1][0])
yi = int(path[j+1][1])
self.lineTo(xi,yi)
elif pathtype[j]=='arc':
x1 = int(path[j+1][0])
y1 = int(path[j+1][1])
x2 = int(path[j+1][2])
y2 = int(path[j+1][3])
r = int(path[j+1][4])
self.arcTo(x1,y1,x2,y2,r)
elif pathtype[j]=='quad':
pass
elif pathtype[j]=='bezier':
pass
else:
pass
self.closePath()
def drawCell(self,gamecell,colors):
"""Draw a cell in the board
:param cell: gamecell to draw
:type cell: gamecell object
:param colors: Cell colors used in this board
:type colors: list
"""
self.drawCellPath(gamecell)
self.setFillStyle(colors[gamecell.color])
self.fill()
def drawSelection(self, gamecell):
"""Draw a selection around the stated cell"""
self.drawCellPath(gamecell)
#self.setStrokeStyle(SELECT)
#self.setLineWidth(1)
#self.stroke()
self.setFillStyle(SELECT)
self.fill()
def drawPieces(self, game):
"""Draw all pieces on their position in state"""
#Window.alert("Drawing Pieces")
for i in game.board.keys():
for j in game.state[i]:
self.drawPiece(game.pieces[j],game.board[i])
def drawPiece(self, gamepiece, cell):
"""Draw a piece in a cell
:param gamepiece: Piece to be drawn
:type gamepiece: gamepiece object
:param cell: Cell in which to draw the piece
:type cell: gamecell object
"""
img = self.img_dict[gamepiece.player][gamepiece.name]
#Window.alert(cell.name)
xi,yi = cell.getPos()
x = int(xi*self.width)
y = int(yi*self.height)
#Window.alert(str(x)+" "+str(y))
wi,hi = cell.getSize()
w = int(wi*self.width)
h = int(wi*self.height)
self.drawImage(img, 0, 0, 45, 45, x, y, w, h)
class GamePlayer(DockPanel):
""" The GamePlayer widget, containing game canvas and controls
Attributes:
GC (GameCanvas): The GameCanvas object, containing game canvas and active game
b (Button): The button for submitting moves
cell1 (TextBox): Origin cell for a piece move
cell2 (TextBox): Destination cell for a piece move
cellPanel (HorizontalPanel): Panel containing cell1 and cell2
mover (VerticalPanel): Panel containing cellPanel and b
selectedCell (list): list of cell IDs that are currently selected
Note:
it might be a good idea to move the game proper out of the GameCanvas object
- references and game-functions are kind of long
"""
selectedCell = []
def __init__(self, width, height, gametype):
"""Initialize the GameCanvas and construct the layout
:param width: width of the game canvas in pixels
:type width: int
:param height: height of the game canvas in pixels
:type height: int
:param gametype: type of game to be loaded
:type gametype: str
"""
DockPanel.__init__(self,HorizontalAlignment=HasAlignment.ALIGN_CENTER,Spacing=10)
if gametype == 'Chess':
self.game = Chess()
self.boardtype = self.game.boardtype
self.images = []
for i in self.game.pieces:
self.images.append('./images/Chess/'+str(i.player)+str(i.name)+'.svg')
self.images = list(set(self.images)) #eliminate duplicates
self.GC = GameCanvas(width, height, self.game)
loadImages(self.images, self)
self.GC.addMouseListener(self)
self.b = Button("Make Move", self, StyleName='teststyle')
self.cell1 = TextBox(StyleName='boxStyle')
self.cell2 = TextBox(StyleName='boxStyle')
self.cellPanel = HorizontalPanel(VerticalAlignment=HasAlignment.ALIGN_MIDDLE)
self.cellPanel.add(self.cell1)
self.cellPanel.add(self.cell2)
self.mover = VerticalPanel(HorizontalAlignment=HasAlignment.ALIGN_CENTER)
self.mover.add(self.cellPanel)
self.mover.add(self.b)
self.add(self.GC, DockPanel.CENTER)
self.add(self.mover, DockPanel.EAST)
def GUImove(self, piece, cell):
"""Execute a move in the game; redraw the board"""
didMove = self.game.make_move(piece, cell)
self.GC.reset(self.game)
def onMouseUp(self, sender, x, y):
mousex = float(x)/BOARDWIDTH
mousey = float(y)/BOARDHEIGHT
clickcell = self.game.whichCell(mousex,mousey)
clickpieceID = self.game.state[clickcell]
#If no cell is selected, make this cell selected.
if len(self.selectedCell)==0:
#If piece on this cell is not active or no piece on this cell, don't select
if len(clickpieceID)==0:
pass
elif self.game.pieces[clickpieceID[len(clickpieceID)-1]].player!=self.game.state['player']:
pass
else:
self.selectedCell.append(clickcell)
#If this cell is selected, unselect this cell
elif self.selectedCell[0]==clickcell:
self.selectedCell.remove(clickcell)
#If another cell is selected, query piece on that cell, call GUImove, clear selected
else:
piecelist = self.game.state[self.selectedCell.pop()]
piece = self.game.pieces[piecelist[len(piecelist)-1]]
cell = self.game.board[clickcell]
self.GUImove(piece, cell)
self.GC.reset(self.game)
for i in self.selectedCell:
self.GC.drawSelection(self.game.board[i])
for j in self.game.state[i]:
self.GC.drawPiece(self.game.pieces[j], self.game.board[i])
def onClick(self,sender):
"""Call function for the text/button-based move controller"""
if sender == self.b:
cell1_txt = self.cell1.getText()
cell2_txt = self.cell2.getText()
#Window.alert(str(cell1_txt))
if cell1_txt and cell2_txt in self.game.board:
piece = self.game.pieces[self.game.state[cell1_txt][len(self.game.state[cell1_txt])-1]]
cell = self.game.board[cell2_txt]
self.GUImove(piece, cell)
else:
Window.alert("cell names not recognized!")
self.cell1.setText("")
self.cell2.setText("")
def onImagesLoaded(self, imagesHandles):
"""Associate the correct image handle with each piece type
:param imageHandles: handles for the images in self.images
:type imageHandles: list
"""
#Window.alert("loading images")
for i in self.images:
substr = i.split('/')
img = substr.pop()
p = int(img[0])
name = img[1:img.find('.')]
self.GC.img_dict[p][name] = imagesHandles[self.images.index(i)]
self.GC.reset(self.game)
if __name__ == '__main__':
pyjd.setup("public/ggPlayer.html")
#h = HTML("<b>Welcome to gg Player!</b> (html)", StyleName='teststyle')
#Window.alert(str(game2.getCoordWidth()))
#Player = GameCanvas(BOARDWIDTH,BOARDHEIGHT,GAMENAME)
Player = GamePlayer(BOARDWIDTH,BOARDHEIGHT,GAMENAME)
#panel = FocusPanel(Widget=game)
#dock = DockPanel(HorizontalAlignment=HasAlignment.ALIGN_CENTER,Spacing=10)
#dock.add(game, DockPanel.CENTER)
#dock.add(b, DockPanel.EAST)
#l = Label("Hello World (label)", StyleName='teststyle')
#base = HTML("Hello from %s" % pygwt.getModuleBaseURL(),StyleName='teststyle')
#RootPanel().add(b)
#RootPanel().add(h)
RootPanel().add(Player)
#RootPanel().add(base)
pyjd.run()
| gpl-3.0 | 9,143,978,955,762,802,000 | 35.414773 | 103 | 0.62342 | false |
veprbl/root | tutorials/pyroot/hsimple.py | 1 | 2699 | ## \file
## \ingroup tutorial_pyroot
## \notebook -js
## This program creates :
## - a one dimensional histogram
## - a two dimensional histogram
## - a profile histogram
## - a memory-resident ntuple
##
## These objects are filled with some random numbers and saved on a file.
##
## \macro_image
## \macro_code
##
## \author Wim Lavrijsen
from ROOT import TCanvas, TFile, TProfile, TNtuple, TH1F, TH2F
from ROOT import gROOT, gBenchmark, gRandom, gSystem, Double
# Create a new canvas, and customize it.
c1 = TCanvas( 'c1', 'Dynamic Filling Example', 200, 10, 700, 500 )
c1.SetFillColor( 42 )
c1.GetFrame().SetFillColor( 21 )
c1.GetFrame().SetBorderSize( 6 )
c1.GetFrame().SetBorderMode( -1 )
# Create a new ROOT binary machine independent file.
# Note that this file may contain any kind of ROOT objects, histograms,
# pictures, graphics objects, detector geometries, tracks, events, etc..
# This file is now becoming the current directory.
hfile = gROOT.FindObject( 'py-hsimple.root' )
if hfile:
hfile.Close()
hfile = TFile( 'py-hsimple.root', 'RECREATE', 'Demo ROOT file with histograms' )
# Create some histograms, a profile histogram and an ntuple
hpx = TH1F( 'hpx', 'This is the px distribution', 100, -4, 4 )
hpxpy = TH2F( 'hpxpy', 'py vs px', 40, -4, 4, 40, -4, 4 )
hprof = TProfile( 'hprof', 'Profile of pz versus px', 100, -4, 4, 0, 20 )
ntuple = TNtuple( 'ntuple', 'Demo ntuple', 'px:py:pz:random:i' )
# Set canvas/frame attributes.
hpx.SetFillColor( 48 )
gBenchmark.Start( 'hsimple' )
# Initialize random number generator.
gRandom.SetSeed()
rannor, rndm = gRandom.Rannor, gRandom.Rndm
# For speed, bind and cache the Fill member functions,
histos = [ 'hpx', 'hpxpy', 'hprof', 'ntuple' ]
for name in histos:
exec '%sFill = %s.Fill' % (name,name)
# Fill histograms randomly.
px, py = Double(), Double()
kUPDATE = 1000
for i in xrange( 25000 ):
# Generate random values.
rannor( px, py )
pz = px*px + py*py
random = rndm(1)
# Fill histograms.
hpx.Fill( px )
hpxpy.Fill( px, py )
hprof.Fill( px, pz )
ntuple.Fill( px, py, pz, random, i )
# Update display every kUPDATE events.
if i and i%kUPDATE == 0:
if i == kUPDATE:
hpx.Draw()
c1.Modified()
c1.Update()
if gSystem.ProcessEvents(): # allow user interrupt
break
# Destroy member functions cache.
for name in histos:
exec 'del %sFill' % name
del histos
gBenchmark.Show( 'hsimple' )
# Save all objects in this file.
hpx.SetFillColor( 0 )
hfile.Write()
hpx.SetFillColor( 48 )
c1.Modified()
c1.Update()
# Note that the file is automatically closed when application terminates
# or when the file destructor is called.
| lgpl-2.1 | -3,326,936,367,549,896,000 | 26.540816 | 80 | 0.674324 | false |
941design/ptTools | ptTools/misc/attributes.py | 1 | 2701 | #!/usr/bin/env python3
"""Module providing the AttributesMixIn class."""
class AttributesMixIn(object):
"""Mix-in, providing an attributes collection of variable type.
Collection type may be list, set or dict. Getters and setters are
polymorphic.
To return a node's inherited attributes, access
self.all_attributes. The return value depends on the attribute
type self was initialized with (defaults to dict).
1. dict - Returns a dict with self's attributes presiding over
its ancestors.
2. list - Returns a list containing all attributes of all
ancestors, starting with root.
"""
def __init__(self, attr_type, **kwargs):
"""Initialized with one of dict, list, or set."""
super().__init__(**kwargs)
if attr_type is None:
self.attributes = None
else:
## The ._collector method will be used to recursively
## compose the attributes for a node. The ._selector
## tests if an item exists as attribute and returns a bool
## or, if attr_type is dict the attribute value.
self.attributes = attr_type()
if attr_type is list:
self._collector = list.extend
self._selector = list.__contains__
elif attr_type is dict:
self._collector = dict.update
self._selector = dict.get
elif attr_type is set:
self._collector = set.union
self._selector = set.__contains__
else:
raise AttributeError('Invalid attribute type.')
def get_attribute(self, key, default=None):
"""Returns value if attributes collection is dict, otherwise
True, if key is present else False."""
if self.attributes is None:
return None
else:
val = self._selector(self.attributes, key)
return val if not val is None else default
def has_attribute(self, key):
"""True if self.attributes includes key."""
return self.get_attribute(key) is not None
def get_attribute_else(self, key, fn):
"""Returns attribute for key, if present, else calls fn(self)."""
if self.attributes is None:
return None
else:
val = self._selector(self.attributes, key)
return val if not val is None else fn(self)
def add_attributes(self, attrs):
"""Adds attrs to self.attributes."""
if self.attributes is None:
raise AttributeError('self has no attributes collection.')
else:
self._collector(self.attributes, attrs)
| gpl-3.0 | -5,291,276,212,073,134,000 | 34.539474 | 73 | 0.593854 | false |
Eric89GXL/scikit-learn | doc/sphinxext/gen_rst.py | 1 | 39133 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import re
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import urllib2
import gzip
import posixpath
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
import numpy as np
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
for comb_name in comb_names:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_1.png': (1, 600),
'plot_outlier_detection_1.png': (3, 372),
'plot_gp_regression_1.png': (2, 250),
'plot_adaboost_twoclass_1.png': (1, 372),
'plot_compare_methods_1.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 10px;
-webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */
-moz-border-radius: 10px; /* FF1-3.6 */
border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */
border: 2px solid #fff;
background-color: white;
/* --> Thumbnail image size */
width: 150px;
height: 100px;
-webkit-background-size: 150px 100px; /* Saf3-4 */
-moz-background-size: 150px 100px; /* FF3.6 */
}
.figure img {
display: inline;
}
div.docstringWrapper p.caption {
display: block;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, .0); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 0px;
border: white;
}
div.docstringWrapper p {
display: none;
background-color: white;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 13px;
margin-top: 0px;
border-style: solid;
border-width: 1px;
}
</style>
.. raw:: html
<script type="text/javascript">
function animateClone(e){
var position;
position = $(this).position();
var clone = $(this).closest('.thumbnailContainer').find('.clonedItem');
var clone_fig = clone.find('.figure');
clone.css("left", position.left - 70).css("top", position.top - 70).css("position", "absolute").css("z-index", 1000).css("background-color", "white");
var cloneImg = clone_fig.find('img');
clone.show();
clone.animate({
height: "270px",
width: "320px"
}, 0
);
cloneImg.css({
'max-height': "200px",
'max-width': "280px"
});
cloneImg.animate({
height: "200px",
width: "280px"
}, 0
);
clone_fig.css({
'margin-top': '20px',
});
clone_fig.show();
clone.find('p').css("display", "block");
clone_fig.css({
height: "240",
width: "305px"
});
cloneP_height = clone.find('p.caption').height();
clone_fig.animate({
height: (200 + cloneP_height)
}, 0
);
clone.bind("mouseleave", function(e){
clone.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
clone_fig.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
});
} //end animateClone()
$(window).load(function () {
$(".figure").css("z-index", 1);
$(".docstringWrapper").each(function(i, obj){
var clone;
var $obj = $(obj);
clone = $obj.clone();
clone.addClass("clonedItem");
clone.appendTo($obj.closest(".thumbnailContainer"));
clone.hide();
$obj.bind("mouseenter", animateClone);
}); // end each
}); // end
</script>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = filter(lambda x: x.endswith('.py'), file_list)
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file' %
src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
for fname in sorted_listdir:
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, fdocstring, _ = extract_docstring(new_fname, True)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
.. toctree::
:hidden:
%s/%s
""" % (link_name, fdocstring, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
os.system("optipng -quiet -o 9 '{0}'".format(out_fname))
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception as err:
# Here, we were not able to execute the
# previous statement, either because the
# fun_name was not a function but a statement
# (print), or because the regexp didn't
# catch the whole function name :
# eg:
# X = something().blah()
# will work for something, but not blah.
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
for name, link in str_repl.iteritems():
line = line.replace(name, link)
fid.write(line.encode('utf-8'))
except urllib2.HTTPError, e:
print ("The following HTTP Error has occurred:\n")
print e.code
except urllib2.URLError, e:
print ("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print e.args
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| bsd-3-clause | -8,002,446,403,779,385,000 | 35.33519 | 160 | 0.523727 | false |
binji/naclports | build_tools/naclports.py | 1 | 23393 | #!/usr/bin/env python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool for manipulating naclports packages in python.
This tool can be used to for working with naclports packages.
It can also be incorporated into other tools that need to
work with packages (e.g. 'update_mirror.py' uses it to iterate
through all packages and mirror them on Google Cloud Storage).
"""
import optparse
import os
import posixpath
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import time
import urlparse
import sha1check
MIRROR_URL = 'http://storage.googleapis.com/naclports/mirror'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACLPORTS_ROOT = os.path.dirname(SCRIPT_DIR)
PORTS_DIR = os.path.join(NACLPORTS_ROOT, "ports")
OUT_DIR = os.path.join(NACLPORTS_ROOT, 'out')
STAMP_DIR = os.path.join(OUT_DIR, 'stamp')
BUILD_ROOT = os.path.join(OUT_DIR, 'build')
ARCHIVE_ROOT = os.path.join(OUT_DIR, 'tarballs')
PACKAGES_ROOT = os.path.join(OUT_DIR, 'packages')
PUBLISH_ROOT = os.path.join(OUT_DIR, 'publish')
PAYLOAD_DIR = 'payload/'
NACL_SDK_ROOT = os.environ.get('NACL_SDK_ROOT')
if NACL_SDK_ROOT:
sys.path.append(os.path.join(NACL_SDK_ROOT, 'tools'))
arch_to_pkgarch = {
'x86_64': 'x86-64',
'i686': 'i686',
'arm': 'arm',
'pnacl': 'pnacl',
}
# Inverse of arch_to_pkgarch
pkgarch_to_arch = {v:k for k, v in arch_to_pkgarch.items()}
# TODO(sbc): use this code to replace the bash logic in build_tools/common.sh
class Error(Exception):
pass
class DisabledError(Error):
pass
def FormatTimeDelta(delta):
rtn = ''
if delta > 60:
mins = delta / 60
rtn += '%dm' % mins
delta -= mins * 60
if delta:
rtn += '%.0fs' % delta
return rtn
def WriteStamp(filename, contents=''):
"""Write a file with the give filename and contents."""
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
Trace('stamp: %s' % filename)
with open(filename, 'w') as f:
f.write(contents)
def CheckStamp(filename, contents=None, timestamp=None):
"""Check that a given stamp file is up-to-date.
Returns False is the file does not exists or is older
that that given comparison file, or does not contain
the given contents.
Return True otherwise.
"""
if not os.path.exists(filename):
return False
if contents is not None:
with open(filename) as f:
if f.read() != contents:
return False
return True
def Log(message):
sys.stdout.write(str(message) + '\n')
sys.stdout.flush()
def Trace(message):
if Trace.verbose:
Log(message)
Trace.verbose = False
def GetCurrentArch():
arch = os.environ.get('NACL_ARCH')
if not arch:
if GetCurrentToolchain() == 'pnacl':
return 'pnacl'
else:
return 'x86_64'
return arch
def GetCurrentLibc():
libc = GetCurrentToolchain()
if libc == 'pnacl':
libc = 'newlib'
return libc
def GetCurrentToolchain():
if os.environ.get('NACL_ARCH') == 'pnacl':
return 'pnacl'
return os.environ.get('TOOLCHAIN') or 'newlib'
def GetDebug():
if os.environ.get('NACL_DEBUG') == '1':
return 'debug'
else:
return 'release'
def GetToolchainRoot(toolchain=None, arch=None):
"""Returns the toolchain folder for a given NaCl toolchain."""
import getos
platform = getos.GetPlatform()
if toolchain == 'pnacl':
tc_dir = '%s_pnacl' % platform
else:
tc_arch = {
'arm': 'arm',
'i686': 'x86',
'x86_64': 'x86'
}[arch]
tc_dir = '%s_%s_%s' % (platform, tc_arch, toolchain)
tc_dir = os.path.join(tc_dir, '%s-nacl' % arch)
return os.path.join(NACL_SDK_ROOT, 'toolchain', tc_dir)
def GetInstallRoot(toolchain, arch):
"""Returns the installation used by naclports within a given toolchain."""
tc_root = GetToolchainRoot(toolchain, arch)
if toolchain == 'pnacl':
return os.path.join(tc_root, 'usr', 'local')
else:
return os.path.join(tc_root, 'usr')
def GetInstallStampRoot(toolchain, arch):
tc_root = GetInstallRoot(toolchain, arch)
return os.path.join(tc_root, 'var', 'lib', 'npkg')
def GetInstallStamp(package_name):
root = GetInstallStampRoot(GetCurrentToolchain(), GetCurrentArch())
return os.path.join(root, package_name)
def IsInstalled(package_name):
stamp = GetInstallStamp(package_name)
return CheckStamp(stamp)
class BinaryPackage(object):
def __init__(self, filename):
self.filename = filename
if not os.path.exists(self.filename):
raise Error('package not found: %s'% self.filename)
basename, extension = os.path.splitext(os.path.basename(filename))
basename = os.path.splitext(basename)[0]
if extension != '.bz2':
raise Error('invalid file extension: %s' % extension)
if '_' not in basename:
raise Error('package filename must contain underscores: %s' % basename)
parts = basename.split('_')
if len(parts) < 3 or len(parts) > 5:
raise Error('invalid package filename: %s' % basename)
if parts[-1] == 'debug':
parts = parts[:-1]
if parts[-1] in ('newlib', 'glibc', 'bionic'):
self.toolchain = parts[-1]
parts = parts[:-1]
self.name, self.version, arch = parts[:3]
if arch == 'pnacl':
self.toolchain = 'pnacl'
self.arch = pkgarch_to_arch[arch]
def IsInstalled(self):
GetInstallStamp(self.name)
def InstallFile(self, filename, old_root, new_root):
oldname = os.path.join(old_root, filename)
if os.path.isdir(oldname):
return
if not filename.startswith(PAYLOAD_DIR):
return
newname = filename[len(PAYLOAD_DIR):]
Trace('install: %s' % newname)
newname = os.path.join(new_root, newname)
dirname = os.path.dirname(newname)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.rename(oldname, newname)
def RelocateFile(self, filename, dest):
# Only relocate files in the payload.
if not filename.startswith(PAYLOAD_DIR):
return
# Only relocate certain file types.
filename = filename[len(PAYLOAD_DIR):]
modify = False
# boost build scripts
# TODO(sbc): move this to the boost package metadata
if filename.startswith('build-1'):
modify = True
# pkg_config (.pc) files
if filename.startswith('lib/pkgconfig'):
modify = True
# <foo>-config scripts that live in usr/bin
if filename.startswith('bin') and filename.endswith('-config'):
modify = True
# libtool's .la files which can contain absolute paths to
# dependencies.
if filename.startswith('lib/') and filename.endswith('.la'):
modify = True
# headers can sometimes also contain absolute paths.
if filename.startswith('include/') and filename.endswith('.h'):
modify = True
filename = os.path.join(dest, filename)
if os.path.isdir(filename):
return
if modify:
with open(filename) as f:
data = f.read()
with open(filename, 'r+') as f:
f.write(data.replace('/naclports-dummydir', dest))
def Install(self):
"""Install binary package into toolchain directory."""
dest = GetInstallRoot(self.toolchain, self.arch)
dest_tmp = os.path.join(dest, 'install_tmp')
if os.path.exists(dest_tmp):
shutil.rmtree(dest_tmp)
Log("Installing '%s' [%s/%s]" % (self.name, self.arch, self.toolchain))
os.makedirs(dest_tmp)
try:
with tarfile.open(self.filename) as t:
names = [posixpath.normpath(name) for name in t.getnames()]
if 'pkg_info' not in names:
raise Error('package does not contain pkg_info file')
for name in names:
if name not in ('.', 'pkg_info', 'payload'):
if not name.startswith(PAYLOAD_DIR):
raise Error('invalid file in package: %s' % name)
t.extractall(dest_tmp)
for name in names:
self.InstallFile(name, dest_tmp, dest)
for name in names:
self.RelocateFile(name, dest)
with open(os.path.join(dest_tmp, 'pkg_info')) as f:
pkg_info = f.read()
WriteStamp(GetInstallStamp(self.name), pkg_info)
finally:
shutil.rmtree(dest_tmp)
class Package(object):
"""Representation of a single naclports package.
Package objects correspond to folders on disk which
contain a 'pkg_info' file.
"""
VALID_KEYS = ('NAME', 'VERSION', 'URL', 'ARCHIVE_ROOT', 'LICENSE', 'DEPENDS',
'MIN_SDK_VERSION', 'LIBC', 'ARCH', 'DISABLED_ARCH',
'URL_FILENAME', 'BUILD_OS', 'SHA1', 'DISABLED')
VALID_SUBDIRS = ('', 'ports', 'python_modules')
def __init__(self, pkg_root):
self.root = os.path.abspath(pkg_root)
self.basename = os.path.basename(self.root)
keys = []
for key in Package.VALID_KEYS:
setattr(self, key, None)
self.DEPENDS = []
self.info = None
for subdir in Package.VALID_SUBDIRS:
info = os.path.join(PORTS_DIR, subdir, self.basename, 'pkg_info')
if os.path.exists(info):
self.info = info
break
if self.info is None:
raise Error('Invalid package folder: %s' % pkg_root)
self.root = os.path.dirname(self.info)
with open(self.info) as f:
for i, line in enumerate(f):
if line[0] == '#':
continue
key, value = self.ParsePkgInfoLine(line, i+1)
keys.append(key)
setattr(self, key, value)
for required_key in ('NAME', 'VERSION'):
if required_key not in keys:
raise Error('%s: pkg_info missing required key: %s' %
(self.info, required_key))
if '_' in self.NAME:
raise Error('%s: package NAME cannot contain underscores' % self.info)
if '_' in self.VERSION:
raise Error('%s: package VERSION cannot contain underscores' % self.info)
if self.NAME != os.path.basename(self.root):
raise Error('%s: package NAME must match directory name' % self.info)
if self.DISABLED_ARCH is not None and self.ARCH is not None:
raise Error('%s: contains both ARCH and DISABLED_ARCH' % self.info)
def __cmp__(self, other):
return cmp(self.NAME, other.NAME)
def ParsePkgInfoLine(self, line, line_no):
if '=' not in line:
raise Error('Invalid pkg_info line %d: %s' % (line_no, self.info))
key, value = line.split('=', 1)
key = key.strip()
if key not in Package.VALID_KEYS:
raise Error("Invalid key '%s' in pkg_info: %s" % (key, self.info))
value = value.strip()
if value[0] == '(':
array_value = []
if value[-1] != ')':
raise Error('Error parsing %s: %s (%s)' % (self.info, key, value))
value = value[1:-1]
for single_value in value.split():
array_value.append(single_value)
value = array_value
else:
value = shlex.split(value)[0]
return (key, value)
def CheckDeps(self, valid_dependencies):
for dep in self.DEPENDS:
if dep not in valid_dependencies:
Log('%s: Invalid dependency: %s' % (self.info, dep))
return False
return True
def GetBasename(self):
basename = os.path.splitext(self.GetArchiveFilename())[0]
if basename.endswith('.tar'):
basename = os.path.splitext(basename)[0]
return basename
def GetBuildLocation(self):
package_dir = self.ARCHIVE_ROOT or '%s-%s' % (self.NAME, self.VERSION)
return os.path.join(BUILD_ROOT, self.NAME, package_dir)
def GetArchiveFilename(self):
if self.URL_FILENAME:
return self.URL_FILENAME
if self.URL and '.git' not in self.URL:
return os.path.basename(urlparse.urlparse(self.URL)[2])
return None
def DownloadLocation(self):
archive = self.GetArchiveFilename()
if not archive:
return
return os.path.join(ARCHIVE_ROOT, archive)
def InstallDeps(self, verbose, force):
for dep in self.DEPENDS:
if not IsInstalled(dep):
dep_dir = os.path.join(os.path.dirname(self.root), dep)
dep = Package(dep_dir)
try:
dep.Install(verbose, True, force)
except DisabledError as e:
Log(str(e))
def PackageFile(self):
toolchain = GetCurrentToolchain()
arch = GetCurrentArch()
fullname = [os.path.join(PACKAGES_ROOT, self.NAME)]
fullname.append(self.VERSION)
fullname.append(arch_to_pkgarch[arch])
if toolchain != arch: # for pnacl toolchain and arch are the same
fullname.append(toolchain)
if os.environ.get('NACL_DEBUG') == '1':
fullname.append('debug')
return '_'.join(fullname) + '.tar.bz2'
def IsInstalled(self):
return IsInstalled(self.NAME)
def IsBuilt(self):
return os.path.exists(self.PackageFile())
def Install(self, verbose, build_deps, force=None):
force_install = force in ('build', 'install', 'all')
if not force_install and self.IsInstalled():
Log("Already installed '%s' [%s/%s]" % (self.NAME, GetCurrentArch(),
GetCurrentLibc()))
return
if not self.IsBuilt() or force:
self.Build(verbose, build_deps, force)
BinaryPackage(self.PackageFile()).Install()
def Build(self, verbose, build_deps, force=None):
self.CheckEnabled()
if build_deps or force == 'all':
self.InstallDeps(verbose, force)
annotate = os.environ.get('NACLPORTS_ANNOTATE') == '1'
arch = GetCurrentArch()
libc = GetCurrentLibc()
force_build = force in ('build', 'all')
if not force_build and self.IsBuilt():
Log("Already built '%s' [%s/%s]" % (self.NAME, arch, libc))
return
log_root = os.path.join(OUT_DIR, 'logs')
if not os.path.isdir(log_root):
os.makedirs(log_root)
stdout = os.path.join(log_root, '%s.log' % self.NAME)
if os.path.exists(stdout):
os.remove(stdout)
if verbose:
prefix = '*** '
else:
prefix = ''
Log("%sBuilding '%s' [%s/%s]" % (prefix, self.NAME, arch, libc))
start = time.time()
self.RunBuildSh(verbose, stdout)
duration = FormatTimeDelta(time.time() - start)
Log("Build complete '%s' [%s/%s] [took %s]"
% (self.NAME, arch, libc, duration))
def RunBuildSh(self, verbose, stdout, args=None):
build_port = os.path.join(SCRIPT_DIR, 'build_port.sh')
cmd = [build_port]
if args is not None:
cmd += args
if verbose:
rtn = subprocess.call(cmd, cwd=self.root)
if rtn != 0:
raise Error("Building %s: failed." % (self.NAME))
else:
with open(stdout, 'a+') as log_file:
rtn = subprocess.call(cmd,
cwd=self.root,
stdout=log_file,
stderr=subprocess.STDOUT)
if rtn != 0:
with open(stdout) as log_file:
sys.stdout.write(log_file.read())
raise Error("Building '%s' failed." % (self.NAME))
def Verify(self, verbose=False):
"""Download upstream source and verify hash."""
archive = self.DownloadLocation()
if not archive:
Log("no archive: %s" % self.NAME)
return True
if self.SHA1 is None:
Log("missing SHA1 attribute: %s" % self.info)
return False
self.Download()
try:
sha1check.VerifyHash(archive, self.SHA1)
Log("verified: %s" % archive)
except sha1check.Error as e:
Log("verification failed: %s: %s" % (archive, str(e)))
return False
return True
def Clean(self):
pkg = self.PackageFile()
Log('removing %s' % pkg)
if os.path.exists(pkg):
os.remove(pkg)
stamp_dir = os.path.join(STAMP_DIR, self.NAME)
Log('removing %s' % stamp_dir)
if os.path.exists(stamp_dir):
shutil.rmtree(stamp_dir)
def Extract(self):
self.ExtractInto(os.path.join(BUILD_ROOT, self.NAME))
def ExtractInto(self, output_path):
"""Extract the package archive into the given location.
This method assumes the package has already been downloaded.
"""
archive = self.DownloadLocation()
if not archive:
return
if not os.path.exists(output_path):
os.makedirs(output_path)
new_foldername = os.path.basename(self.GetBuildLocation())
dest = os.path.join(output_path, new_foldername)
if os.path.exists(dest):
Trace('Already exists: %s' % dest)
return
tmp_output_path = tempfile.mkdtemp(dir=OUT_DIR)
try:
ext = os.path.splitext(archive)[1]
if ext in ('.gz', '.tgz', '.bz2'):
cmd = ['tar', 'xf', archive, '-C', tmp_output_path]
elif ext in ('.zip',):
cmd = ['unzip', '-q', '-d', tmp_output_path, archive]
else:
raise Error('unhandled extension: %s' % ext)
Log("Extracting '%s'" % self.NAME)
Trace(cmd)
subprocess.check_call(cmd)
src = os.path.join(tmp_output_path, new_foldername)
os.rename(src, dest)
finally:
shutil.rmtree(tmp_output_path)
def GetMirrorURL(self):
return MIRROR_URL + '/' + self.GetArchiveFilename()
def CheckEnabled(self):
if self.LIBC is not None and self.LIBC != GetCurrentLibc():
raise DisabledError('%s: cannot be built with %s.'
% (self.NAME, GetCurrentLibc()))
if self.ARCH is not None:
arch = GetCurrentArch()
if arch not in self.ARCH:
raise DisabledError('%s: disabled for current arch: %s.'
% (self.NAME, arch))
if self.DISABLED_ARCH is not None:
arch = GetCurrentArch()
if arch in self.DISABLED_ARCH:
raise DisabledError('%s: disabled for current arch: %s.'
% (self.NAME, arch))
if self.BUILD_OS is not None:
import getos
if getos.GetPlatform() != self.BUILD_OS:
raise DisabledError('%s: can only be built on %s.'
% (self.NAME, self.BUILD_OS))
def Download(self, mirror=True):
filename = self.DownloadLocation()
if not filename or os.path.exists(filename):
return
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
temp_filename = filename + '.partial'
mirror_download_successfull = False
curl_cmd = ['curl', '--fail', '--location', '--stderr', '-',
'-o', temp_filename]
if os.isatty(sys.stdout.fileno()):
# Add --progress-bar but only if stdout is a TTY device.
curl_cmd.append('--progress-bar')
else:
# otherwise suppress all status output, since curl always
# assumes a TTY and writes \r and \b characters.
curl_cmd.append('--silent')
if mirror:
try:
mirror_url = self.GetMirrorURL()
Log('Downloading: %s [%s]' % (mirror_url, temp_filename))
subprocess.check_call(curl_cmd + [mirror_url])
mirror_download_successfull = True
except subprocess.CalledProcessError:
pass
if not mirror_download_successfull:
Log('Downloading: %s [%s]' % (self.URL, temp_filename))
subprocess.check_call(curl_cmd + [self.URL])
os.rename(temp_filename, filename)
def PackageIterator(folders=None):
"""Iterator which yield a Package object for each
naclport package."""
if not folders:
folders = [os.path.join(NACLPORTS_ROOT, 'ports')]
for folder in folders:
for root, dirs, files in os.walk(folder):
if 'pkg_info' in files:
yield Package(root)
def run_main(args):
usage = "Usage: %prog [options] <command> [<package_dir>]"
parser = optparse.OptionParser(description=__doc__, usage=usage)
parser.add_option('-v', '--verbose', action='store_true',
help='Output extra information.')
parser.add_option('-V', '--verbose-build', action='store_true',
help='Make the build itself version (e.g. pass V=1 to make')
parser.add_option('--all', action='store_true',
help='Perform action on all known ports.')
parser.add_option('-f', '--force', action='store_const', const='build',
dest='force', help='Force building specified targets, '
'even if timestamps would otherwise skip it.')
parser.add_option('-F', '--force-all', action='store_const', const='all',
dest='force', help='Force building target and all '
'dependencies, even if timestamps would otherwise skip '
'them.')
parser.add_option('--force-install', action='store_const', const='install',
dest='force', help='Force installing of ports')
parser.add_option('--no-deps', dest='build_deps', action='store_false',
default=True,
help='Disable automatic building of dependencies.')
parser.add_option('--ignore-disabled', action='store_true',
help='Ignore attempts to build disabled packages.\n'
'Normally attempts to build such packages will result\n'
'in an error being returned.')
options, args = parser.parse_args(args)
if not args:
parser.error('You must specify a sub-command. See --help.')
command = args[0]
package_dirs = ['.']
if len(args) > 1:
if options.all:
parser.error('Package name and --all cannot be specified together')
package_dirs = args[1:]
if not NACL_SDK_ROOT:
raise Error('$NACL_SDK_ROOT not set')
if not os.path.isdir(NACL_SDK_ROOT):
raise Error('$NACL_SDK_ROOT does not exist: %s' % NACL_SDK_ROOT)
verbose = options.verbose or os.environ.get('VERBOSE') == '1'
Trace.verbose = verbose
if options.verbose_build:
os.environ['VERBOSE'] = '1'
else:
os.environ['VERBOSE'] = '0'
os.environ['V'] = '0'
def DoCmd(package):
try:
if command == 'download':
package.Download()
elif command == 'check':
# Fact that we've got this far means the pkg_info
# is basically valid. This final check verifies the
# dependencies are valid.
package_names = [os.path.basename(p.root) for p in PackageIterator()]
package.CheckDeps(package_names)
elif command == 'enabled':
package.CheckEnabled()
elif command == 'verify':
package.Verify()
elif command == 'clean':
package.Clean()
elif command == 'build':
package.Build(verbose, options.build_deps, options.force)
elif command == 'install':
package.Install(verbose, options.build_deps, options.force)
else:
parser.error("Unknown subcommand: '%s'\n"
"See --help for available commands." % command)
except DisabledError as e:
if options.ignore_disabled:
Log('naclports: %s' % e)
else:
raise e
def rmtree(path):
Log('removing %s' % path)
if os.path.exists(path):
shutil.rmtree(path)
if options.all:
options.ignore_disabled = True
if command == 'clean':
rmtree(STAMP_DIR)
rmtree(BUILD_ROOT)
rmtree(PACKAGES_ROOT)
rmtree(PUBLISH_ROOT)
rmtree(GetInstallStampRoot(GetCurrentToolchain(), GetCurrentArch()))
rmtree(GetInstallRoot(GetCurrentToolchain(), GetCurrentArch()))
else:
for p in PackageIterator():
if not p.DISABLED:
DoCmd(p)
else:
for package_dir in package_dirs:
p = Package(package_dir)
DoCmd(p)
def main(args):
try:
run_main(args)
except Error as e:
sys.stderr.write('naclports: %s\n' % e)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -9,160,051,275,839,171,000 | 29.499348 | 80 | 0.624418 | false |
BillGuard/cabot | cabot/rest_urls.py | 1 | 3991 | from django.db import models as model_fields
from django.conf.urls import url, include
from django.contrib.auth import models as django_models
from polymorphic import PolymorphicModel
from cabot.cabotapp import models
from rest_framework import routers, serializers, viewsets, mixins
import logging
logger = logging.getLogger(__name__)
router = routers.DefaultRouter()
def create_viewset(arg_model, arg_fields, arg_read_only_fields=(), no_create=False):
arg_read_only_fields = ('id',) + arg_read_only_fields
for field in arg_read_only_fields:
if field not in arg_fields:
arg_fields = arg_fields + (field,)
class Serializer(serializers.ModelSerializer):
class Meta:
model = arg_model
fields = arg_fields
read_only_fields = arg_read_only_fields
viewset_class = None
if no_create:
class NoCreateViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
pass
viewset_class = NoCreateViewSet
else:
viewset_class = viewsets.ModelViewSet
arg_queryset = None
if issubclass(arg_model, PolymorphicModel):
arg_queryset = arg_model.objects.instance_of(arg_model)
else:
arg_queryset = arg_model.objects.all()
class ViewSet(viewset_class):
queryset = arg_queryset
serializer_class = Serializer
ordering = ['id']
filter_fields = arg_fields
return ViewSet
check_group_mixin_fields = (
'name',
'users_to_notify',
'alerts_enabled',
'status_checks',
'email_alert',
'hipchat_alert',
'sms_alert',
'telephone_alert',
'hackpad_id',
)
router.register(r'services', create_viewset(
arg_model=models.Service,
arg_fields=check_group_mixin_fields + (
'url',
),
))
router.register(r'instances', create_viewset(
arg_model=models.Instance,
arg_fields=check_group_mixin_fields + (
'address',
),
))
status_check_fields = (
'name',
'active',
'importance',
'frequency',
'debounce',
)
router.register(r'status_checks', create_viewset(
arg_model=models.StatusCheck,
arg_fields=status_check_fields,
no_create=True,
))
router.register(r'icmp_checks', create_viewset(
arg_model=models.ICMPStatusCheck,
arg_fields=status_check_fields,
))
router.register(r'graphite_checks', create_viewset(
arg_model=models.GraphiteStatusCheck,
arg_fields=status_check_fields + (
'metric',
'check_type',
'value',
'expected_num_hosts',
),
))
router.register(r'http_checks', create_viewset(
arg_model=models.HttpStatusCheck,
arg_fields=status_check_fields + (
'endpoint',
'username',
'password',
'text_match',
'status_code',
'timeout',
'verify_ssl_certificate',
),
))
router.register(r'jenkins_checks', create_viewset(
arg_model=models.JenkinsStatusCheck,
arg_fields=status_check_fields + (
'max_queued_build_time',
),
))
'''
Omitting user API, could expose/allow modifying dangerous fields.
router.register(r'users', create_viewset(
arg_model=django_models.User,
arg_fields=(
'password',
'is_active',
'groups',
#'user_permissions', # Doesn't work, removing for now
'username',
'first_name',
'last_name',
'email',
),
))
router.register(r'user_profiles', create_viewset(
arg_model=models.UserProfile,
arg_fields=(
'user',
'mobile_number',
'hipchat_alias',
'fallback_alert_user',
),
))
'''
router.register(r'shifts', create_viewset(
arg_model=models.Shift,
arg_fields=(
'start',
'end',
'user',
'uid',
'deleted',
)
))
| mit | 5,411,351,808,016,618,000 | 23.635802 | 84 | 0.609121 | false |
CymaticLabs/Unity3D.Amqp | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/sublanguages/include.py | 1 | 1284 | import Core
import re
info = {
"friendly_name": "Include Page",
"example_spacing": " PageTitle",
"example_template": "",
"summary": "Transcludes the contents of another page into the current page.",
"details": """
<p>If the included page has section (sub-)headings, they will be
included as (sub-)subheadings of the section in which the @include
block appears, unless the word 'toplevel' is placed after the page
title, like this:</p>
<pre>
@include PageTitle toplevel
</pre>
"""
}
def SublanguageHandler(args, doc, renderer):
if doc.paragraph.indent > 0:
raise Core.BlockSyntaxError("'@include' must not be indented at all")
args = [x.strip() for x in re.split(' *', args)]
if not args or not args[0]:
raise Core.BlockSyntaxError("'@include' needs a PageTitle")
pagename = args[0]
toplevel = (len(args) > 1) and args[1] == 'toplevel'
page = Core.Page(pagename)
if toplevel:
page.render_on(renderer)
else:
old_rank_offset = renderer.save_rank_offset()
try:
page.render_on(renderer)
finally:
renderer.restore_rank_offset(old_rank_offset)
if page.exists():
renderer.page.mark_dependency_on(pagename)
| mit | -4,046,064,245,910,437,400 | 26.913043 | 81 | 0.625389 | false |
headstrongsolutions/Jarvis_Screen | gettime.py | 1 | 1266 | from datetime import datetime
def convert_daynum_to_friendly(day_num):
friendly_string = ""
lastdigit = int(str(day_num[-1]))
if lastdigit == 1 and day_num != 11:
friendly_string = "st"
if lastdigit == 2 and day_num != 12:
friendly_string = "nd"
if lastdigit == 3 and day_num != 13:
friendly_string = "rd"
else:
friendly_string = "th"
return "%s%s" % (day_num, friendly_string)
def get_time_now():
time_now = datetime.now().time()
time_hours = time_now.strftime("%I").lstrip('0')
time_minutes = time_now.strftime("%M")
ampm = time_now.strftime("%p").lower()
return "%s:%s %s" % (time_hours, time_minutes, ampm)
def get_date_now():
date_now = datetime.now()
day = date_now.strftime("%A")
day_number = convert_daynum_to_friendly(date_now.strftime("%d"))
month_name = date_now.strftime("%B")
formatted_date = "%s %s %s" % (day, day_number, month_name)
return formatted_date
def get_now_zulu():
date_now = datetime.now()
zulu_date = "%sZ" % date_now.strftime("%Y-%m-%d")
return zulu_date
def convert_zulu_to_dayname(day):
dayname_from_zulu = datetime.strptime(day[:-1], "%Y-%m-%d").strftime("%a")
return dayname_from_zulu | mit | 8,256,574,304,943,224,000 | 25.957447 | 78 | 0.597946 | false |
maferelo/saleor | saleor/discount/migrations/0002_voucher.py | 2 | 3498 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-04 17:07
from __future__ import unicode_literals
import datetime
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("product", "0010_auto_20160129_0826"),
("discount", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Voucher",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"type",
models.CharField(
choices=[
("product", "Product"),
("category", "Category"),
("shipping", "Shipping"),
("basket", "Baskets over"),
],
max_length=20,
),
),
("name", models.CharField(blank=True, max_length=255, null=True)),
("code", models.CharField(db_index=True, max_length=12, unique=True)),
(
"usage_limit",
models.PositiveIntegerField(
blank=True, help_text="Unlimited if empty", null=True
),
),
("used", models.PositiveIntegerField(default=0, editable=False)),
("start_date", models.DateField(default=datetime.date.today)),
(
"end_date",
models.DateField(
blank=True, help_text="Never expire if empty", null=True
),
),
(
"discount_value_type",
models.CharField(
choices=[
("fixed", settings.DEFAULT_CURRENCY),
("percentage", "%"),
],
default="fixed",
max_length=10,
),
),
(
"discount_value",
models.DecimalField(decimal_places=2, max_digits=12),
),
("apply_to", models.CharField(blank=True, max_length=20, null=True)),
(
"limit",
models.DecimalField(
blank=True, decimal_places=2, max_digits=12, null=True
),
),
(
"category",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="product.Category",
),
),
(
"product",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="product.Product",
),
),
],
)
]
| bsd-3-clause | 39,567,817,853,150,090 | 33.294118 | 86 | 0.368782 | false |
Dturati/projetoUFMT | estagio/estagio/base/urls.py | 1 | 2567 | from django.conf.urls import url,include
from estagio.base.views import home,\
contatos,\
lista_em_arvore,\
download,\
view_compacta_pesquisa_selecionada,\
baixar_pesquisa,\
exemplo,\
view_compacta_toda_pesquisa,\
exemplo_assinc,\
get_resultado,\
define_sessao,\
requisicao_enviada,\
status_stak_celery,\
cancelar_requisicao,\
fila_celery,\
upload,\
gerarGrafico,\
sincroniza_dados,\
DownloadUpload,\
cancela_requisicao_upload,\
status_requisicao_upload
app_name = 'thegaps'
urlpatterns = [
url(r'^home/$',home, name='home'), #Caminho da view home
url(r'^ajax/lista_diretorios/$',lista_em_arvore, name='lista_diretorios'), #Caminho da view home
url(r'^contatos/$',contatos, name='contatos'), #Caminho da view home
url(r'^home/download/(?P<path>.*)/$',download, name='download'), #Caminho da view dowload
url(r'^ajax/compacta_pesquisa/$',view_compacta_pesquisa_selecionada, name='view_compacta_pesquisa_selecionada'), #Caminho da view home
url(r'^ajax/compacta_toda_pesquisa/$',view_compacta_toda_pesquisa, name='view_compacta_toda_pesquisa'), #Caminho da view home
url(r'^baixar_pesquisa/$',baixar_pesquisa, name='baixar_pesquisa'), #Caminho da view home
url(r'^exemplo/$',exemplo, name='exemplo'), #Caminho da view home
url(r'^websocket/$',exemplo_assinc, name='websocket'), #Caminho da view home
url(r'^ajax/resultado/$',get_resultado, name='resultado'), #Caminho da view home
url(r'^ajax/define_sessao/$',define_sessao, name='define_sessao'), #Caminho da view home
url(r'^requisicao_enviada/$',requisicao_enviada, name='requisicao_enviada'), #Caminho da view dowload
url(r'^requisicao_enviada/ajax/status_stak_celery/$',status_stak_celery,name='status_stak_celery'),
url(r'^requisicao_enviada/ajax/fila_celery/$',fila_celery,name='fila_celery'),
url(r'^upload/ajax/fila_celery/$',fila_celery,name='fila_celery_upload'),
url(r'^ajax/cancelar_requisicao',cancelar_requisicao,name='cancelar_requisicao'),
url(r'^upload/$',upload,name='upload'),
url(r'^ajax/gerar_grafico/$',gerarGrafico,name='gerar_grafico'),
url(r'^sincroniza_dados/$',sincroniza_dados,name='sincroniza_dados'),
url(r'^download_upload/(?P<file>.*)/$',DownloadUpload,name='download_upload'),
url(r'^ajax/cancela_requisicao_upload/$',cancela_requisicao_upload,name='cancela_requisicao_upload'),
url(r'^ajax/status_requisicao_upload/$',status_requisicao_upload,name='status_requisicao_upload'),
url(r'^$',home, name='home'),
]
| mit | 4,855,286,763,664,258,000 | 49.333333 | 138 | 0.698091 | false |
mdavidsaver/pyDevSup | archApp/bplreport.py | 1 | 4059 | # -*- coding: utf-8 -*-
import time, sched, urllib2, json
from devsup.db import IOScanListBlock
from devsup.hooks import initHook
from devsup.util import StoppableThread
class BPLReport(object):
reports = {}
def __init__(self, name, url, period):
self.name = name
self.url, self.period = url, period
self.result = None
self.reports[name] = self
self.scan = IOScanListBlock()
def fail(self):
self.result = None
def process(self):
self.result = None
R = urllib2.urlopen(self.url, timeout=3)
try:
if R.getcode()!=200:
print 'Fail',R.getcode(), self.url
self.result = None
return
self.result = json.load(R)
except:
print 'Error fetching',self.url
import traceback
traceback.print_exc()
finally:
R.close()
self.result_time = time.time()
self.scan.interrupt(reason = self.result)
add = BPLReport
class ReportRunner(StoppableThread):
class _Done(Exception):
pass
def _sleep(self, time):
if not self.sleep(time):
raise self._Done()
def _proc(self, R):
self._S.enter(R.period, 0, self._proc, (R,))
try:
R.process()
except:
print 'Error in processing',R.url
import traceback
traceback.print_exc()
R.fail()
def run(self):
self._S = S = sched.scheduler(time.time, self._sleep)
for R in BPLReport.reports.itervalues():
S.enter(0, 0, self._proc, (R,))
try:
S.run()
except self._Done:
print 'BPL worker exit'
except:
print 'Error in scheduler'
import traceback
traceback.print_exc()
_worker = ReportRunner()
@initHook("AfterIocRunning")
def _startWorker():
_worker.start()
print 'BPL worker started'
@initHook("AtIocExit")
def _stopWorker():
print 'BPL worker stopping'
_worker.join()
print 'BPL worker stopped'
class ReportItem(object):
raw = True
def __init__(self, rec, args):
# "<operation> <report>.<index>.<attribute> "
opname, src = args.split(None,2)[:2]
self.report, self.idx, self.attrib = src.split('.',2)
self.idx = int(self.idx)
self.R = BPLReport.reports[self.report]
self.allowScan = self.R.scan.add
self.process = getattr(self, 'process_'+opname)
def detach(self, rec):
pass
def process_fetch_float(self, rec, reason=None):
R = self.R.result
invalid = True
if R is not None and len(R)>self.idx:
try:
rec.VAL = float(str(R[self.idx][self.attrib]).translate(None,','))
except KeyError:
pass
else:
invalid = False
rec.UDF = invalid
rec.setTime(self.R.result_time)
def process_fetch_int(self, rec, reason=None):
R = self.R.result
invalid = True
if R is not None and len(R)>self.idx:
try:
rec.VAL = int(R[self.idx][self.attrib])
except KeyError:
pass
else:
invalid = False
rec.UDF = invalid
rec.setTime(self.R.result_time)
def process_fetch_string(self, rec, reason=None):
R = self.R.result
invalid = True
if R is not None and len(R)>self.idx:
try:
rec.VAL = R[self.idx][self.attrib].encode('ascii')
except KeyError:
pass
else:
invalid = False
if invalid:
rec.setSevr() # default is INVALID_ALARM
rec.setTime(self.R.result_time)
def process_fetch_length(self, rec, reason=None):
if self.R.result is not None:
rec.VAL = len(self.R.result)
rec.UDF = self.R.result is None
rec.setTime(self.R.result_time)
build = ReportItem
| gpl-2.0 | 2,099,237,936,138,473,700 | 25.880795 | 82 | 0.539295 | false |
icisneros/uav_landing | OtherssCode/Precland-master_moreRecent/PrecisionLand.py | 1 | 8500 | #!/usr/bin/python
#SYSTEM IMPORTS
import math
import time
import cv2
import Queue
import os
import inspect
import sys
import numpy as np
from copy import copy
#Add script directory to path
script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory
sys.path.append(script_dir)
#COMMOM IMPORTS
from Common.VN_config import VN_config
from Common.VN_video import VN_video
from Common.VN_dispatcher import VN_dispatcher
from Common.VN_logger import VN_logger
from Common.VN_util import *
from Common.VN_position_vector import PositionVector
from Common.VN_vehicle_control import veh_control
#PRECISION LAND IMPORTS
from PrecisionLand_lib.PL_gui import PrecisionLandGUI as gui
from PrecisionLand_lib.PL_sim import sim
from PrecisionLand_lib.CircleDetector import CircleDetector
from PrecisionLand_lib.topcode import TopCode
#DRONEAPI IMPORTS
from droneapi.lib import VehicleMode, Location, Attitude
'''
Temporary Changes:
-added kill_camera(commented out)
'''
'''
TODO:
-Add timestamp/sync
-update config file
Future:
-add parameter set over mavlink
Improvements:
-fix config file naming/loading(make it more intelligent)
-add better target_detected logic(multiple frames required for a lock)
-add update rate to VN_logger
-fix Logging printing to console
-handle droneapi start up better(location being null at start up-> issue using see inside_landing_area() RIGHT at startup)
'''
class PrecisionLand(object):
def __init__(self):
#load config file
VN_config.get_file('Smart_Camera')
#get camera specs
self.camera_index = VN_config.get_integer('camera','camera_index',0)
self.camera_width = VN_config.get_integer('camera', 'camera_width', 640)
self.camera_height = VN_config.get_integer('camera', 'camera_height', 480)
self.camera_hfov = VN_config.get_float('camera', 'horizontal-fov', 72.42)
self.camera_vfov = VN_config.get_float('camera', 'vertical-fov', 43.3)
#simulator
self.simulator = VN_config.get_boolean('simulator','use_simulator',True)
self.target_file = VN_config.get_string('simulator', 'target_location', os.environ['HOME'] + '/visnav/target.jpg')
self.target_size = VN_config.get_float('algorithm', 'outer_ring', 1.0)
#Run the program no matter what mode or location; Useful for debug purposes
self.always_run = VN_config.get_boolean('general', 'always_run', True)
#how many frames have been captured
self.frames_captured = 0
#debugging:
self.kill_camera = False
def name(self):
return "Precision_Land"
def connect(self):
while(veh_control.is_connected() == False):
# connect to droneapi
veh_control.connect(local_connect())
self.vehicle = veh_control.get_vehicle()
def run(self):
VN_logger.text(VN_logger.GENERAL, 'Running {0}'.format(self.name()))
#start a video capture
if(self.simulator):
VN_logger.text(VN_logger.GENERAL, 'Using simulator')
sim.load_target(self.target_file, self.target_size)
sim.set_target_location(veh_control.get_home(True))
#sim.set_target_location(Location(0,0,0))
else:
VN_video.start_capture(self.camera_index)
#create an image processor
#detector = CircleDetector()
detector = TopCode()
#create a queue for images
imageQueue = []
#initilize autopilot variables
location = Location(0,0,0)
attitude = Attitude(0,0,0)
while veh_control.is_connected():
'''
#kill camera for testing
if(cv2.waitKey(2) == 1113938):
self.kill_camera = not self.kill_camera
'''
#we are in the landing zone or in a landing mode and we are still running the landing program
#just because the program is running does not mean it controls the vehicle
#i.e. in the landing area but not in a landing mode
#FIXME add inside_landing_area() back to conditional
if (self.always_run) or (veh_control.get_mode() == "LAND") or (veh_control.get_mode() == "RTL"):
#update how often we dispatch a command
VN_dispatcher.calculate_dispatch_schedule()
#update simulator
if(self.simulator):
#get info from autopilot
location = veh_control.get_location()
attitude = veh_control.get_attitude()
sim.refresh_simulator(location,attitude)
# grab an image
frame = None
capStart = current_milli_time()
if(self.simulator):
frame = sim.get_frame()
else:
frame = VN_video.get_image()
capStop = current_milli_time()
'''
if(self.kill_camera):
frame[:] = (0,255,0)
'''
#update capture time
VN_dispatcher.update_capture_time(capStop-capStart)
#Process image
#We schedule the process as opposed to waiting for an available core
#This brings consistancy and prevents overwriting a dead process before
#information has been grabbed from the Pipe
if VN_dispatcher.is_ready():
#queue the image for later use: displaying image, overlays, recording
imageQueue.append((frame,self.frames_captured))
#the function must be run directly from the class
VN_dispatcher.dispatch(target=detector.analyze_frame_async, args=(frame,self.frames_captured))
self.frames_captured += 1
#retreive results
if VN_dispatcher.is_available():
#results of image processor
results = VN_dispatcher.retreive()
# get image that was passed with the image processor
for f in imageQueue:
img, frame_id = f
if results[0] == frame_id:
imageQueue.remove(f)
break
VN_logger.text(VN_logger.GENERAL, 'Frame {0}'.format(frame_id))
#overlay gui
#rend_Image = gui.add_target_highlights(img, results[3])
rend_Image = gui.add_ring_highlights(img, results[4])
#show/record images
VN_logger.image(VN_logger.RAW, img)
VN_logger.image(VN_logger.GUI, rend_Image)
#display/log data
VN_logger.text(VN_logger.ALGORITHM,'RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'.format(results[1],results[2],results[3],results[4]))
VN_logger.text(VN_logger.DEBUG, 'Rings detected: {0}'.format(len(results[4])))
#send results if we found the landing pad
if(results[2] is not None):
#shift origin to center of the image
x_pixel = results[2][0] - (self.camera_width/2.0)
y_pixel = results[2][1] - (self.camera_height/2.0) #y-axis is inverted??? Works with arducopter
#convert target location to angular radians
x_angle = x_pixel * (self.camera_hfov / self.camera_width) * (math.pi/180.0)
y_angle = y_pixel * (self.camera_vfov / self.camera_height) * (math.pi/180.0)
#send commands to autopilot
veh_control.report_landing_target(x_angle, y_angle, results[3],0,0)
else:
VN_logger.text(VN_logger.GENERAL, 'Not in landing mode')
#terminate program
VN_logger.text(VN_logger.GENERAL, 'Vehicle disconnected, Program Terminated')
if(self.simulator == False):
VN_video.stop_capture()
# if starting from mavproxy
if __name__ == "__builtin__":
# start precision landing
strat = PrecisionLand()
# connect to droneapi
VN_logger.text(VN_logger.GENERAL, 'Connecting to vehicle...')
strat.connect()
VN_logger.text(VN_logger.GENERAL, 'Vehicle connected!')
# run strategy
strat.run()
| mit | -4,166,947,670,528,823,300 | 33.274194 | 164 | 0.593176 | false |
FATSLiM/fatslim | docs/sphinx-src/conf.py | 1 | 8359 | # -*- coding: utf-8 -*-
#
# FATSLiM documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 17:28:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
import fatslimlib
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FATSLiM'
author_list = fatslimlib.__authors__
authors = u", ".join(author_list)
copyright = u'2016, ' + authors
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = fatslimlib.__version__
# The full version, including alpha/beta/rc tags.
release = fatslimlib.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FATSLiMdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FATSLiM.tex', u'FATSLiM Documentation',
author_list, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fatslim', u'FATSLiM Documentation',
authors, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FATSLiM', u'FATSLiM Documentation',
author_list, 'FATSLiM', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 | 8,787,420,351,984,867,000 | 30.424812 | 79 | 0.706783 | false |
alt-core/sheetbot | tests/plugin/test_twilio.py | 1 | 9414 | # coding: utf-8
from __future__ import absolute_import
import unittest
from webtest import TestApp
import os, sys, subprocess, json
gcloud_info = json.loads(subprocess.check_output(['gcloud', 'info', '--format=json']))
sdk_path = os.path.join(gcloud_info["installation"]["sdk_root"], 'platform', 'google_appengine')
sys.path.append(sdk_path)
sys.path.append(os.path.join(sdk_path, 'lib', 'yaml', 'lib'))
sys.path.insert(0, './lib')
#sys.path.append(os.path.join(sdk_path, 'platform/google_appengine/lib'))
from google.appengine.ext import testbed
tb = testbed.Testbed()
tb.activate()
#tb.init_datastore_v3_stub()
#tb.init_memcache_stub()
#tb.init_app_identity_stub()
tb.init_all_stubs()
#tb.deactivate()
BOT_SETTINGS = {
'OPTIONS': {
'api_token': u'test_api_token',
'reset_keyword': u'強制リセット'
},
'PLUGINS': {
'twilio': {},
'google_sheets': {},
},
'BOTS': {
'testbot': {
'interfaces': [{
'type': 'twilio',
'params': {
'twilio_sid': '<<TWILIO_SID>>',
'twilio_auth_token': '<<TWILIO_AUTH_TOKEN>>',
'dial_from': '<<TEL_FOR_DIAL>>',
'sms_from': '<<TEL_FOR_SMS_SEND>>',
}
}],
'scenario': {
'type': 'google_sheets',
'params': {
'sheet_id': "<<sheet_id>>",
'key_file_json': 'path_to_keyfile_sheets_prod.json',
}
}
},
'testbot2': {
'interfaces': [{
'type': 'twilio',
'params': {
'twilio_sid': '<<TWILIO_SID>>',
'twilio_auth_token': '<<TWILIO_AUTH_TOKEN>>',
'dial_from': '<<TEL_FOR_DIAL>>',
'sms_from': '<<TEL_FOR_SMS_SEND>>',
}
}],
'scenario': {
'type': 'google_sheets',
'params': {
'sheet_id': "<<sheet_id>>",
'key_file_json': 'path_to_keyfile_sheets_prod.json',
}
}
},
}
}
import settings
import main
import common_commands
import auth
import webapi
import plugin.twilio.webapi
from scenario import ScenarioBuilder
from tests.test import reinitialize_bot, DummyScenarioLoader, dummy_send_request_factory
class TwilioPluginTestCase(unittest.TestCase):
def setUp(self):
reinitialize_bot(BOT_SETTINGS)
self.app_twilio = TestApp(plugin.twilio.webapi.app)
self.app = TestApp(webapi.app)
common_commands.send_request = dummy_send_request_factory(self, self.app)
self.test_bot_loader = DummyScenarioLoader()
self.test_bot = main.get_bot('testbot')
self.test_bot.scenario_loader = self.test_bot_loader
self.test_bot2_loader = DummyScenarioLoader()
self.test_bot2 = main.get_bot('testbot2')
self.test_bot2.scenario_loader = self.test_bot2_loader
self.forwarded_messages = []
import bottle
bottle.debug(True)
def tearDown(self):
pass
def send_twilio_reset(self, from_tel):
message = settings.OPTIONS['reset_keyword']
res = self.app_twilio.post('/twilio/callback/testbot?token=' + auth.api_token, {'From': from_tel, 'To': '+815000000000', 'Body': message.encode('utf-8'), 'MessageSid': 'MEffffffffffffffffffffffffffffffff'})
return res
def send_twilio_call(self, from_tel, to_tel):
res = self.app_twilio.post('/twilio/callback/testbot?token=' + auth.api_token, {'From': from_tel, 'To': to_tel, 'CallSid': 'CAffffffffffffffffffffffffffffffff'})
return res
def send_twilio_message(self, from_tel, to_tel, message):
res = self.app_twilio.post('/twilio/callback/testbot?token=' + auth.api_token, {'From': from_tel, 'To': to_tel, 'Body': message.encode('utf-8'), 'MessageSid': 'MEffffffffffffffffffffffffffffffff'})
return res
def send_twilio_api_send_group(self, group, action):
res = self.app.get('/api/v1/bots/testbot/action?user=group:'+group+'&action='+action+'&token='+auth.api_token)
self.assertEqual(res.status, "200 OK")
self.assertEqual(res.headers["Content-Type"], u"text/plain; charset=UTF-8")
res_json = json.loads(res.text)
self.assertEqual(res_json[u"code"], 200)
self.assertEqual(res_json[u"result"], u"Success")
return res_json[u"message"]
def send_twilio_api_send_uid(self, from_tel, action):
res = self.app.get('/api/v1/bots/testbot/action?user=twilio:'+from_tel+'&action='+action+'&token='+auth.api_token)
self.assertEqual(res.status, "200 OK")
self.assertEqual(res.headers["Content-Type"], u"text/plain; charset=UTF-8")
res_json = json.loads(res.text)
self.assertEqual(res_json[u"code"], 200)
self.assertEqual(res_json[u"result"], u"Success")
return res_json[u"message"]
def assert_twilio_response_body(self, body, response_string):
self.assertEqual(body, u'<?xml version="1.0" encoding="UTF-8"?><Response>{}</Response>'.format(response_string))
def assert_twilio_response(self, res, response_string):
self.assertEqual(str(res).decode('utf-8'), u'Response: 200 OK\nContent-Type: text/xml; charset=UTF-8\n<?xml version="1.0" encoding="UTF-8"?><Response>{}</Response>'.format(response_string))
def assert_twilio_single_message(self, res, message):
self.assert_twilio_response(res, u'<Message>{}</Message>'.format(message))
def test_twilio(self):
self.test_bot.scenario = ScenarioBuilder.build_from_tables([
(u'default', [
[u'#tel:+815011111111', u'電話されました'],
[u'#tel:+815022222222', u'<Reject reason="rejected"></Reject>'],
[u'テキストメッセージ', u'SMSを受信しました'],
]),
])
res = self.send_twilio_call(u'+819012345678', u'+815011111111')
self.assert_twilio_response(res, u'<Say language="ja-jp" voice="woman">電話されました</Say>')
res = self.send_twilio_call(u'+819012345678', u'+815022222222')
self.assert_twilio_response(res, u'<Reject reason="rejected"></Reject>')
res = self.send_twilio_message(u'+819012345678', u'+815011111111', u'テキストメッセージ')
self.assert_twilio_single_message(res, u'SMSを受信しました')
def test_forward(self):
self.test_bot.scenario = ScenarioBuilder.build_from_tables([
(u'default', [
[u'forward', u'@転送', u'testbot2', u'#転送'],
[u'', u'転送をいたしました。XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'],
]),
])
self.test_bot2.scenario = ScenarioBuilder.build_from_tables([
(u'default', [
[u'#転送', u'転送されてきました。'],
]),
])
res = self.send_twilio_message(u'+819012345678', u'+815011111111', u'forward')
self.assert_twilio_response_body(res.text, u'<Message>転送をいたしました。XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX</Message>')
self.assertEqual(len(self.forwarded_messages), 1)
self.assert_twilio_response_body(self.forwarded_messages[0], u'<Message>転送されてきました。</Message>')
def test_api_send(self):
self.test_bot.scenario = ScenarioBuilder.build_from_tables([
(u'default', [
[u'group_add', u'@group_add', u'all'],
[u'', u'追加しました。'],
[u'group_del', u'@group_del', u'all'],
[u'', u'削除しました。'],
[u'group_clear', u'@group_clear', u'all'],
[u'', u'クリアしました。'],
[u'from_api', u'WebAPI経由での呼び出し'],
]),
])
res = self.send_twilio_message(u'+819012345678', u'+815011111111', u'group_add')
self.assert_twilio_single_message(res, u"追加しました。")
body = self.send_twilio_api_send_group(u'all', u'from_api')
self.assert_twilio_response_body(body, u"<Message>WebAPI経由での呼び出し</Message>")
res = self.send_twilio_message(u'+819012345678', u'+815011111111', u'group_del')
self.assert_twilio_single_message(res, u"削除しました。")
body = self.send_twilio_api_send_group(u'all', u'from_api')
self.assertEqual(body, u'')
res = self.send_twilio_message(u'+819012345678', u'+815011111111', u'group_add')
self.assert_twilio_single_message(res, u"追加しました。")
body = self.send_twilio_api_send_group(u'all', u'from_api')
self.assert_twilio_response_body(body, u"<Message>WebAPI経由での呼び出し</Message>")
res = self.send_twilio_message(u'+819012345678', u'+815011111111', u'group_clear')
self.assert_twilio_single_message(res, u"クリアしました。")
body = self.send_twilio_api_send_group(u'all', u'from_api')
self.assertEqual(body, u'')
body = self.send_twilio_api_send_uid(u'+819012345678', u'from_api')
self.assert_twilio_response_body(body, u"<Message>WebAPI経由での呼び出し</Message>")
if __name__ == '__main__':
unittest.main()
| mit | 8,377,975,937,539,998,000 | 41.900474 | 214 | 0.590035 | false |
m3z/HT | openstack_dashboard/dashboards/project/images_and_snapshots/images/tests.py | 1 | 4704 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
IMAGES_INDEX_URL = reverse('horizon:project:images_and_snapshots:index')
class ImageViewTests(test.TestCase):
def test_image_create_get(self):
url = reverse('horizon:project:images_and_snapshots:images:create')
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/images_and_snapshots/images/create.html')
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post(self):
data = {
'name': u'Ubuntu 11.10',
'copy_from': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'disk_format': u'qcow2',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': 1,
'method': 'CreateImageForm'}
api.glance.image_create(IsA(http.HttpRequest),
container_format="bare",
copy_from=data['copy_from'],
disk_format=data['disk_format'],
is_public=True,
min_disk=data['minimum_disk'],
min_ram=data['minimum_ram'],
name=data['name']). \
AndReturn(self.images.first())
self.mox.ReplayAll()
url = reverse('horizon:project:images_and_snapshots:images:create')
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(self.images.first())
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images_and_snapshots:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images_and_snapshots/images/detail.html')
self.assertEqual(res.context['image'].name, image.name)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get_with_exception(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
url = reverse('horizon:project:images_and_snapshots:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
@test.create_stubs({api: ('image_get',)})
def test_image_update_get(self):
image = self.images.first()
image.disk_format = "ami"
image.is_public = True
api.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images_and_snapshots:images:update',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images_and_snapshots/images/_update.html')
self.assertEqual(res.context['image'].name, image.name)
# Bug 1076216 - is_public checkbox not being set correctly
self.assertContains(res, "<input type='checkbox' id='id_public'"
" name='public' checked='checked'>",
html=True,
msg_prefix="The is_public checkbox is not checked")
| apache-2.0 | -8,853,933,789,205,918,000 | 38.864407 | 79 | 0.590136 | false |
dims/cinder | cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py | 1 | 46513 | # Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp block storage library
"""
import copy
import uuid
import mock
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
class NetAppBlockStorageLibraryTestCase(test.TestCase):
def setUp(self):
super(NetAppBlockStorageLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_base()}
self.library = block_base.NetAppBlockStorageLibrary(
'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.mock_request = mock.Mock()
def tearDown(self):
super(NetAppBlockStorageLibraryTestCase, self).tearDown()
def get_config_base(self):
return na_fakes.create_configuration()
def test_get_reserved_percentage_default_multipler(self):
default = 1.2
reserved_percentage = 20.0
self.library.configuration.netapp_size_multiplier = default
self.library.configuration.reserved_percentage = reserved_percentage
self.mock_object(block_base, 'LOG')
result = self.library._get_reserved_percentage()
self.assertEqual(reserved_percentage, result)
self.assertFalse(block_base.LOG.warn.called)
def test_get_reserved_percentage(self):
multiplier = 2.0
self.library.configuration.netapp_size_multiplier = multiplier
self.mock_object(block_base, 'LOG')
result = self.library._get_reserved_percentage()
reserved_ratio = round(1 - (1 / multiplier), 2)
reserved_percentage = 100 * int(reserved_ratio)
self.assertEqual(reserved_percentage, result)
self.assertTrue(block_base.LOG.warn.called)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value={'Volume': 'FAKE_CMODE_VOL1'}))
def test_get_pool(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual('FAKE_CMODE_VOL1', pool)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value=None))
def test_get_pool_no_metadata(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertIsNone(pool)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr',
mock.Mock(return_value=dict()))
def test_get_pool_volume_unknown(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertIsNone(pool)
def test_create_volume(self):
volume_size_in_bytes = int(fake.SIZE) * units.Gi
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(na_utils, 'log_extra_spec_warnings')
self.mock_object(block_base, 'LOG')
self.mock_object(volume_utils, 'extract_host', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(self.library, '_setup_qos_for_volume',
mock.Mock(return_value=None))
self.mock_object(self.library, '_create_lun')
self.mock_object(self.library, '_create_lun_handle')
self.mock_object(self.library, '_add_lun_to_table')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.create_volume(fake.VOLUME)
self.library._create_lun.assert_called_once_with(
fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes,
fake.LUN_METADATA, None)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(0, block_base.LOG.error.call_count)
def test_create_volume_no_pool(self):
self.mock_object(volume_utils, 'extract_host', mock.Mock(
return_value=None))
self.assertRaises(exception.InvalidHost, self.library.create_volume,
fake.VOLUME)
def test_create_volume_exception_path(self):
self.mock_object(block_base, 'LOG')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.library, '_setup_qos_for_volume',
mock.Mock(return_value=None))
self.mock_object(self.library, '_create_lun', mock.Mock(
side_effect=Exception))
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.assertRaises(exception.VolumeBackendAPIException,
self.library.create_volume, fake.VOLUME)
self.assertEqual(1, self.library.
_mark_qos_policy_group_for_deletion.call_count)
self.assertEqual(1, block_base.LOG.exception.call_count)
def test_create_volume_no_pool_provided_by_scheduler(self):
fake_volume = copy.deepcopy(fake.VOLUME)
# Set up fake volume whose 'host' field is missing pool information.
fake_volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
self.assertRaises(exception.InvalidHost, self.library.create_volume,
fake_volume)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
self.library.host_type = 'linux'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
self.zapi_client.map_lun.return_value = '1'
lun_id = self.library._map_lun('fake_volume',
fake.FC_FORMATTED_INITIATORS,
protocol, None)
self.assertEqual('1', lun_id)
mock_get_or_create_igroup.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS, protocol, os)
self.zapi_client.map_lun.assert_called_once_with(
fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None)
@mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base, 'LOG', mock.Mock())
def test_map_lun_mismatch_host_os(
self, mock_get_or_create_igroup, mock_get_lun_attr):
os = 'windows'
protocol = 'fcp'
self.library.host_type = 'linux'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
self.library._map_lun('fake_volume',
fake.FC_FORMATTED_INITIATORS,
protocol, None)
mock_get_or_create_igroup.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS, protocol,
self.library.host_type)
self.zapi_client.map_lun.assert_called_once_with(
fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None)
self.assertEqual(1, block_base.LOG.warning.call_count)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2')
self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
lun_id = self.library._map_lun(
'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None)
self.assertEqual('2', lun_id)
mock_find_mapped_lun_igroup.assert_called_once_with(
fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_or_create_igroup')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_map_lun_api_error(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
'iscsi')
mock_find_mapped_lun_igroup.return_value = (None, None)
self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
self.assertRaises(netapp_api.NaApiError, self.library._map_lun,
'fake_volume', fake.FC_FORMATTED_INITIATORS,
protocol, None)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_find_mapped_lun_igroup')
def test_unmap_lun(self, mock_find_mapped_lun_igroup):
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1)
self.library._unmap_lun(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS)
self.zapi_client.unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.IGROUP1_NAME)
def test_find_mapped_lun_igroup(self):
self.assertRaises(NotImplementedError,
self.library._find_mapped_lun_igroup,
fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
def test_has_luns_mapped_to_initiators(self):
self.zapi_client.has_luns_mapped_to_initiators.return_value = True
self.assertTrue(self.library._has_luns_mapped_to_initiators(
fake.FC_FORMATTED_INITIATORS))
self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS)
def test_get_or_create_igroup_preexisting(self):
self.zapi_client.get_igroup_by_initiators.return_value = [fake.IGROUP1]
self.library._create_igroup_add_initiators = mock.Mock()
igroup_name, host_os, ig_type = self.library._get_or_create_igroup(
fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux')
self.assertEqual(fake.IGROUP1_NAME, igroup_name)
self.assertEqual('linux', host_os)
self.assertEqual('fcp', ig_type)
self.zapi_client.get_igroup_by_initiators.assert_called_once_with(
fake.FC_FORMATTED_INITIATORS)
self.assertEqual(
0, self.library._create_igroup_add_initiators.call_count)
@mock.patch.object(uuid, 'uuid4', mock.Mock(return_value=fake.UUID1))
def test_get_or_create_igroup_none_preexisting(self):
"""This method also tests _create_igroup_add_initiators."""
self.zapi_client.get_igroup_by_initiators.return_value = []
igroup_name, os, ig_type = self.library._get_or_create_igroup(
fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux')
self.assertEqual('openstack-' + fake.UUID1, igroup_name)
self.zapi_client.create_igroup.assert_called_once_with(
igroup_name, 'fcp', 'linux')
self.assertEqual(len(fake.FC_FORMATTED_INITIATORS),
self.zapi_client.add_igroup_initiator.call_count)
self.assertEqual('linux', os)
self.assertEqual('fcp', ig_type)
def test_get_fc_target_wwpns(self):
self.assertRaises(NotImplementedError,
self.library._get_fc_target_wwpns)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_map_lun')
def test_initialize_connection_fc(self, mock_map_lun,
mock_build_initiator_target_map):
self.maxDiff = None
mock_map_lun.return_value = '1'
mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS,
fake.FC_I_T_MAP, 4)
target_info = self.library.initialize_connection_fc(fake.FC_VOLUME,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO)
mock_map_lun.assert_called_once_with(
'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_map_lun')
def test_initialize_connection_fc_no_wwpns(
self, mock_map_lun, mock_build_initiator_target_map):
mock_map_lun.return_value = '1'
mock_build_initiator_target_map.return_value = (None, None, 0)
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_fc,
fake.FC_VOLUME,
fake.FC_CONNECTOR)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = True
target_info = self.library.terminate_connection_fc(fake.FC_VOLUME,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY)
mock_unmap_lun.assert_called_once_with(fake.LUN_PATH,
fake.FC_FORMATTED_INITIATORS)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_build_initiator_target_map')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_has_luns_mapped_to_initiators')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_unmap_lun')
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_lun_attr')
def test_terminate_connection_fc_no_more_luns(
self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators,
mock_build_initiator_target_map):
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH}
mock_unmap_lun.return_value = None
mock_has_luns_mapped_to_initiators.return_value = False
mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS,
fake.FC_I_T_MAP, 4)
target_info = self.library.terminate_connection_fc(fake.FC_VOLUME,
fake.FC_CONNECTOR)
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_fc_target_wwpns')
def test_build_initiator_target_map_no_lookup_service(
self, mock_get_fc_target_wwpns):
self.library.lookup_service = None
mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS
(target_wwpns, init_targ_map, num_paths) = \
self.library._build_initiator_target_map(fake.FC_CONNECTOR)
self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns))
self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map)
self.assertEqual(0, num_paths)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_fc_target_wwpns')
def test_build_initiator_target_map_with_lookup_service(
self, mock_get_fc_target_wwpns):
self.library.lookup_service = mock.Mock()
self.library.lookup_service.get_device_mapping_from_network.\
return_value = fake.FC_FABRIC_MAP
mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS
(target_wwpns, init_targ_map, num_paths) = \
self.library._build_initiator_target_map(fake.FC_CONNECTOR)
self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns))
self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map)
self.assertEqual(4, num_paths)
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup_san_configured(self, mock_check_flags):
self.library.configuration.netapp_lun_ostype = 'windows'
self.library.configuration.netapp_host_type = 'solaris'
self.library.configuration.netapp_lun_space_reservation = 'disabled'
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertEqual('windows', self.library.lun_ostype)
self.assertEqual('solaris', self.library.host_type)
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup_san_unconfigured(self, mock_check_flags):
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.configuration.netapp_lun_space_reservation = 'enabled'
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertEqual('linux', self.library.lun_ostype)
self.assertEqual('linux', self.library.host_type)
def test_do_setup_space_reservation_disabled(self):
self.mock_object(na_utils, 'check_flags')
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.configuration.netapp_lun_space_reservation = 'disabled'
self.library.do_setup(mock.Mock())
self.assertEqual('false', self.library.lun_space_reservation)
def test_do_setup_space_reservation_enabled(self):
self.mock_object(na_utils, 'check_flags')
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.configuration.netapp_lun_space_reservation = 'enabled'
self.library.do_setup(mock.Mock())
self.assertEqual('true', self.library.lun_space_reservation)
def test_get_existing_vol_manage_missing_id_path(self):
self.assertRaises(exception.ManageExistingInvalidReference,
self.library._get_existing_vol_with_manage_ref,
{})
def test_get_existing_vol_manage_not_found(self):
self.zapi_client.get_lun_by_args.return_value = []
self.assertRaises(exception.ManageExistingInvalidReference,
self.library._get_existing_vol_with_manage_ref,
{'source-id': 'src_id',
'source-name': 'lun_path'})
self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_extract_lun_info',
mock.Mock(return_value=block_base.NetAppLun(
'lun0', 'lun0', '3', {'UUID': 'src_id'})))
def test_get_existing_vol_manage_lun(self):
self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1']
lun = self.library._get_existing_vol_with_manage_ref(
{'source-id': 'src_id', 'path': 'lun_path'})
self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count)
self.library._extract_lun_info.assert_called_once_with('lun0')
self.assertEqual('lun0', lun.name)
@mock.patch.object(block_base.NetAppBlockStorageLibrary,
'_get_existing_vol_with_manage_ref',
mock.Mock(return_value=block_base.NetAppLun(
'handle', 'name', '1073742824', {})))
def test_manage_existing_get_size(self):
size = self.library.manage_existing_get_size(
{'id': 'vol_id'}, {'ref': 'ref'})
self.assertEqual(2, size)
self.library._get_existing_vol_with_manage_ref.assert_called_once_with(
{'ref': 'ref'})
@mock.patch.object(block_base.LOG, 'info')
def test_unmanage(self, log):
mock_lun = block_base.NetAppLun('handle', 'name', '1',
{'Path': 'p', 'UUID': 'uuid'})
self.library._get_lun_from_table = mock.Mock(return_value=mock_lun)
self.library.unmanage({'name': 'vol'})
self.library._get_lun_from_table.assert_called_once_with('vol')
self.assertEqual(1, log.call_count)
def test_check_vol_type_for_lun(self):
self.assertRaises(NotImplementedError,
self.library._check_volume_type_for_lun,
'vol', 'lun', 'existing_ref', {})
def test_is_lun_valid_on_storage(self):
self.assertTrue(self.library._is_lun_valid_on_storage('lun'))
def test_initialize_connection_iscsi(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = (
target_details_list)
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list',
mock.Mock(return_value=target_details_list[1]))
self.zapi_client.get_iscsi_service_details.return_value = (
fake.ISCSI_SERVICE_IQN)
self.mock_object(
na_utils, 'get_iscsi_connection_properties',
mock.Mock(return_value=fake.ISCSI_CONNECTION_PROPERTIES))
target_info = self.library.initialize_connection_iscsi(volume,
connector)
self.assertEqual(fake.ISCSI_CONNECTION_PROPERTIES, target_info)
block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with(
fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']],
'iscsi', None)
self.zapi_client.get_iscsi_target_details.assert_called_once_with()
block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\
.assert_called_once_with(
target_details_list)
self.zapi_client.get_iscsi_service_details.assert_called_once_with()
def test_initialize_connection_iscsi_no_target_list(self):
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = None
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list')
self.mock_object(
na_utils, 'get_iscsi_connection_properties',
mock.Mock(return_value=fake.ISCSI_CONNECTION_PROPERTIES))
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_iscsi,
volume, connector)
self.assertEqual(
0, block_base.NetAppBlockStorageLibrary
._get_preferred_target_from_list.call_count)
self.assertEqual(
0, self.zapi_client.get_iscsi_service_details.call_count)
self.assertEqual(
0, na_utils.get_iscsi_connection_properties.call_count)
def test_initialize_connection_iscsi_no_preferred_target(self):
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = None
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list',
mock.Mock(return_value=None))
self.mock_object(na_utils, 'get_iscsi_connection_properties')
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_iscsi,
volume, connector)
self.assertEqual(0, self.zapi_client
.get_iscsi_service_details.call_count)
self.assertEqual(0, na_utils.get_iscsi_connection_properties
.call_count)
def test_initialize_connection_iscsi_no_iscsi_service_details(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
volume = fake.ISCSI_VOLUME
connector = fake.ISCSI_CONNECTOR
self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun',
mock.Mock(return_value=fake.ISCSI_LUN['lun_id']))
self.zapi_client.get_iscsi_target_details.return_value = (
target_details_list)
self.mock_object(block_base.NetAppBlockStorageLibrary,
'_get_preferred_target_from_list',
mock.Mock(return_value=target_details_list[1]))
self.zapi_client.get_iscsi_service_details.return_value = None
self.mock_object(na_utils, 'get_iscsi_connection_properties')
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_iscsi,
volume,
connector)
block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with(
fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']],
'iscsi', None)
self.zapi_client.get_iscsi_target_details.assert_called_once_with()
block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\
.assert_called_once_with(target_details_list)
def test_get_target_details_list(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertEqual(target_details_list[0], result)
def test_get_preferred_target_from_empty_list(self):
target_details_list = []
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertIsNone(result)
def test_get_preferred_target_from_list_with_one_interface_disabled(self):
target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST)
target_details_list[0]['interface-enabled'] = 'false'
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertEqual(target_details_list[1], result)
def test_get_preferred_target_from_list_with_all_interfaces_disabled(self):
target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST)
for target in target_details_list:
target['interface-enabled'] = 'false'
result = self.library._get_preferred_target_from_list(
target_details_list)
self.assertEqual(target_details_list[0], result)
def test_get_preferred_target_from_list_with_filter(self):
target_details_list = fake.ISCSI_TARGET_DETAILS_LIST
filter = [target_detail['address']
for target_detail in target_details_list[1:]]
result = self.library._get_preferred_target_from_list(
target_details_list, filter)
self.assertEqual(target_details_list[1], result)
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
@mock.patch.object(block_base, 'LOG', mock.Mock())
def test_setup_error_invalid_lun_os(self):
self.library.configuration.netapp_lun_ostype = 'unknown'
self.library.do_setup(mock.Mock())
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
msg = _("Invalid value for NetApp configuration"
" option netapp_lun_ostype.")
block_base.LOG.error.assert_called_once_with(msg)
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
@mock.patch.object(block_base, 'LOG', mock.Mock())
def test_setup_error_invalid_host_type(self):
self.library.configuration.netapp_lun_ostype = 'linux'
self.library.configuration.netapp_host_type = 'future_os'
self.library.do_setup(mock.Mock())
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
msg = _("Invalid value for NetApp configuration"
" option netapp_host_type.")
block_base.LOG.error.assert_called_once_with(msg)
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
def test_check_for_setup_error_both_config(self):
self.library.configuration.netapp_lun_ostype = 'linux'
self.library.configuration.netapp_host_type = 'linux'
self.library.do_setup(mock.Mock())
self.zapi_client.get_lun_list.return_value = ['lun1']
self.library._extract_and_populate_luns = mock.Mock()
self.library.check_for_setup_error()
self.library._extract_and_populate_luns.assert_called_once_with(
['lun1'])
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
def test_check_for_setup_error_no_os_host(self):
self.library.configuration.netapp_lun_ostype = None
self.library.configuration.netapp_host_type = None
self.library.do_setup(mock.Mock())
self.zapi_client.get_lun_list.return_value = ['lun1']
self.library._extract_and_populate_luns = mock.Mock()
self.library.check_for_setup_error()
self.library._extract_and_populate_luns.assert_called_once_with(
['lun1'])
def test_delete_volume(self):
mock_delete_lun = self.mock_object(self.library, '_delete_lun')
self.library.delete_volume(fake.VOLUME)
mock_delete_lun.assert_called_once_with(fake.LUN_NAME)
def test_delete_lun(self):
mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr')
mock_get_lun_attr.return_value = fake.LUN_METADATA
self.library.zapi_client = mock.Mock()
self.library.lun_table = fake.LUN_TABLE
self.library._delete_lun(fake.LUN_NAME)
mock_get_lun_attr.assert_called_once_with(
fake.LUN_NAME, 'metadata')
self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH)
def test_delete_lun_no_metadata(self):
self.mock_object(self.library, '_get_lun_attr', mock.Mock(
return_value=None))
self.library.zapi_client = mock.Mock()
self.mock_object(self.library, 'zapi_client')
self.library._delete_lun(fake.LUN_NAME)
self.library._get_lun_attr.assert_called_once_with(
fake.LUN_NAME, 'metadata')
self.assertEqual(0, self.library.zapi_client.destroy_lun.call_count)
self.assertEqual(0,
self.zapi_client.
mark_qos_policy_group_for_deletion.call_count)
def test_delete_snapshot(self):
mock_delete_lun = self.mock_object(self.library, '_delete_lun')
self.library.delete_snapshot(fake.SNAPSHOT)
mock_delete_lun.assert_called_once_with(fake.SNAPSHOT_NAME)
def test_clone_source_to_destination(self):
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(
return_value=fake.QOS_POLICY_GROUP_INFO))
self.mock_object(self.library, '_clone_lun')
self.mock_object(self.library, '_extend_volume')
self.mock_object(self.library, 'delete_volume')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.lun_space_reservation = 'false'
self.library._clone_source_to_destination(fake.CLONE_SOURCE,
fake.CLONE_DESTINATION)
na_utils.get_volume_extra_specs.assert_called_once_with(
fake.CLONE_DESTINATION)
self.library._setup_qos_for_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.EXTRA_SPECS)
self.library._clone_lun.assert_called_once_with(
fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME,
space_reserved='false',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.library._extend_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE,
fake.QOS_POLICY_GROUP_NAME)
self.assertEqual(0, self.library.delete_volume.call_count)
self.assertEqual(0, self.library.
_mark_qos_policy_group_for_deletion.call_count)
def test_clone_source_to_destination_exception_path(self):
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(self.library, '_setup_qos_for_volume', mock.Mock(
return_value=fake.QOS_POLICY_GROUP_INFO))
self.mock_object(self.library, '_clone_lun')
self.mock_object(self.library, '_extend_volume', mock.Mock(
side_effect=Exception))
self.mock_object(self.library, 'delete_volume')
self.mock_object(self.library, '_mark_qos_policy_group_for_deletion')
self.library.lun_space_reservation = 'true'
self.assertRaises(exception.VolumeBackendAPIException,
self.library._clone_source_to_destination,
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
na_utils.get_volume_extra_specs.assert_called_once_with(
fake.CLONE_DESTINATION)
self.library._setup_qos_for_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.EXTRA_SPECS)
self.library._clone_lun.assert_called_once_with(
fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME,
space_reserved='true',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.library._extend_volume.assert_called_once_with(
fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE,
fake.QOS_POLICY_GROUP_NAME)
self.assertEqual(1, self.library.delete_volume.call_count)
self.assertEqual(1, self.library.
_mark_qos_policy_group_for_deletion.call_count)
def test_create_lun(self):
self.assertRaises(NotImplementedError, self.library._create_lun,
fake.VOLUME_ID, fake.LUN_ID, fake.SIZE,
fake.LUN_METADATA)
def test_clone_lun(self):
self.assertRaises(NotImplementedError, self.library._clone_lun,
fake.VOLUME_ID, 'new-' + fake.VOLUME_ID)
def test_create_volume_from_snapshot(self):
mock_do_clone = self.mock_object(self.library,
'_clone_source_to_destination')
source = {
'name': fake.SNAPSHOT['name'],
'size': fake.SNAPSHOT['volume_size']
}
self.library.create_volume_from_snapshot(fake.VOLUME, fake.SNAPSHOT)
mock_do_clone.assert_has_calls([
mock.call(source, fake.VOLUME)])
def test_create_cloned_volume(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(self.library,
'_get_lun_from_table')
mock_get_lun_from_table.return_value = fake_lun
mock_do_clone = self.mock_object(self.library,
'_clone_source_to_destination')
source = {
'name': fake_lun.name,
'size': fake.VOLUME_REF['size']
}
self.library.create_cloned_volume(fake.VOLUME, fake.VOLUME_REF)
mock_do_clone.assert_has_calls([
mock.call(source, fake.VOLUME)])
def test_extend_volume(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_setup_qos_for_volume = self.mock_object(
self.library, '_setup_qos_for_volume',
mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO))
mock_extend_volume = self.mock_object(self.library, '_extend_volume')
self.library.extend_volume(fake.VOLUME, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_setup_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS)
mock_extend_volume.assert_called_once_with(fake.VOLUME,
new_size,
fake.QOS_POLICY_GROUP_NAME)
def test_extend_volume_api_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_setup_qos_for_volume = self.mock_object(
self.library, '_setup_qos_for_volume',
mock.Mock(return_value=fake.QOS_POLICY_GROUP_INFO))
mock_extend_volume = self.mock_object(
self.library, '_extend_volume',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(netapp_api.NaApiError,
self.library.extend_volume,
fake.VOLUME,
new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_setup_qos_for_volume.assert_has_calls([
mock.call(volume_copy, fake.EXTRA_SPECS),
mock.call(fake.VOLUME, fake.EXTRA_SPECS)])
mock_extend_volume.assert_called_once_with(
fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME)
def test__extend_volume_direct(self):
current_size = fake.LUN_SIZE
current_size_bytes = current_size * units.Gi
new_size = fake.LUN_SIZE * 2
new_size_bytes = new_size * units.Gi
max_size = fake.LUN_SIZE * 10
max_size_bytes = max_size * units.Gi
fake_volume = copy.copy(fake.VOLUME)
fake_volume['size'] = new_size
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
current_size_bytes,
fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)}
mock_get_lun_geometry = self.mock_object(
self.library.zapi_client, 'get_lun_geometry',
mock.Mock(return_value=fake_lun_geometry))
mock_do_direct_resize = self.mock_object(self.library.zapi_client,
'do_direct_resize')
mock_do_sub_clone_resize = self.mock_object(self.library,
'_do_sub_clone_resize')
self.library.lun_table = {fake.VOLUME['name']: fake_lun}
self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy')
mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name'])
mock_get_lun_geometry.assert_called_once_with(
fake.LUN_METADATA['Path'])
mock_do_direct_resize.assert_called_once_with(
fake.LUN_METADATA['Path'], six.text_type(new_size_bytes))
self.assertFalse(mock_do_sub_clone_resize.called)
self.assertEqual(six.text_type(new_size_bytes),
self.library.lun_table[fake.VOLUME['name']].size)
def test__extend_volume_clone(self):
current_size = fake.LUN_SIZE
current_size_bytes = current_size * units.Gi
new_size = fake.LUN_SIZE * 20
new_size_bytes = new_size * units.Gi
max_size = fake.LUN_SIZE * 10
max_size_bytes = max_size * units.Gi
fake_volume = copy.copy(fake.VOLUME)
fake_volume['size'] = new_size
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
current_size_bytes,
fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)}
mock_get_lun_geometry = self.mock_object(
self.library.zapi_client, 'get_lun_geometry',
mock.Mock(return_value=fake_lun_geometry))
mock_do_direct_resize = self.mock_object(self.library.zapi_client,
'do_direct_resize')
mock_do_sub_clone_resize = self.mock_object(self.library,
'_do_sub_clone_resize')
self.library.lun_table = {fake.VOLUME['name']: fake_lun}
self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy')
mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name'])
mock_get_lun_geometry.assert_called_once_with(
fake.LUN_METADATA['Path'])
self.assertFalse(mock_do_direct_resize.called)
mock_do_sub_clone_resize.assert_called_once_with(
fake.LUN_METADATA['Path'], six.text_type(new_size_bytes),
qos_policy_group_name='fake_qos_policy')
self.assertEqual(six.text_type(new_size_bytes),
self.library.lun_table[fake.VOLUME['name']].size)
def test__extend_volume_no_change(self):
current_size = fake.LUN_SIZE
current_size_bytes = current_size * units.Gi
new_size = fake.LUN_SIZE
max_size = fake.LUN_SIZE * 10
max_size_bytes = max_size * units.Gi
fake_volume = copy.copy(fake.VOLUME)
fake_volume['size'] = new_size
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE,
fake.LUN_ID,
current_size_bytes,
fake.LUN_METADATA)
mock_get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table',
mock.Mock(return_value=fake_lun))
fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)}
mock_get_lun_geometry = self.mock_object(
self.library.zapi_client, 'get_lun_geometry',
mock.Mock(return_value=fake_lun_geometry))
mock_do_direct_resize = self.mock_object(self.library.zapi_client,
'do_direct_resize')
mock_do_sub_clone_resize = self.mock_object(self.library,
'_do_sub_clone_resize')
self.library.lun_table = {fake_volume['name']: fake_lun}
self.library._extend_volume(fake_volume, new_size, 'fake_qos_policy')
mock_get_lun_from_table.assert_called_once_with(fake_volume['name'])
self.assertFalse(mock_get_lun_geometry.called)
self.assertFalse(mock_do_direct_resize.called)
self.assertFalse(mock_do_sub_clone_resize.called)
| apache-2.0 | -7,212,369,763,697,224,000 | 45.513 | 79 | 0.608884 | false |
Konubinix/weboob | modules/boursorama/pages/accounts_list.py | 1 | 4273 | # -*- coding: utf-8 -*-
# Copyright(C) 2011 Gabriel Kerneis
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
from weboob.capabilities.bank import Account
from weboob.deprecated.browser import Page
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
class AccountsList(Page):
def on_loaded(self):
pass
def get_list(self):
blocks = self.document.xpath('//div[@id="synthese-list"]//div[@class="block"]')
for div in blocks:
block_title = ''.join(div.xpath('.//span[@class="title"]//text()')).lower()
for tr in div.getiterator('tr'):
account = Account()
account.id = None
account._link_id = None
if 'assurance vie' in block_title:
# Life insurance accounts are investments
account.type = Account.TYPE_MARKET
for td in tr.getiterator('td'):
if td.get('class', '') == 'account-cb':
try:
a = td.xpath('./*/a[@class="gras"]')[0]
except IndexError:
# ignore account
break
account.type = Account.TYPE_CARD
account.label = self.parser.tocleanstring(a)
try:
account._link_id = td.xpath('.//a')[0].get('href')
except KeyError:
pass
elif td.get('class', '') == 'account-name':
try:
span = td.xpath('./span[@class="label"]')[0]
except IndexError:
# ignore account
break
account.label = self.parser.tocleanstring(span)
try:
account._link_id = td.xpath('.//a')[0].get('href')
account._detail_url = account._link_id
except KeyError:
pass
elif td.get('class', '') == 'account-more-actions':
for a in td.getiterator('a'):
# For normal account, two "account-more-actions"
# One for the account, one for the credit card. Take the good one
if "mouvements.phtml" in a.get('href') and "/cartes/" not in a.get('href'):
account._link_id = a.get('href')
elif td.get('class', '') == 'account-number':
id = td.text
id = id.strip(u' \n\t')
account.id = id
elif td.get('class', '') == 'account-total':
span = td.find('span')
if span is None:
balance = td.text
else:
balance = span.text
account.currency = account.get_currency(balance)
balance = FrenchTransaction.clean_amount(balance)
if balance != "":
account.balance = Decimal(balance)
else:
account.balance = Decimal(0)
else:
# because of some weird useless <tr>
if account.id is not None:
yield account
| agpl-3.0 | 8,497,556,090,955,697,000 | 42.161616 | 103 | 0.478352 | false |
escattone/kuma | kuma/core/utils.py | 1 | 8709 | import logging
import os
from smtplib import SMTPConnectError, SMTPServerDisconnected
from urllib.parse import parse_qsl, ParseResult, urlparse, urlsplit, urlunsplit
import requests
from django.conf import settings
from django.core.mail import EmailMultiAlternatives, get_connection
from django.http import QueryDict
from django.utils.cache import patch_cache_control
from django.utils.encoding import smart_bytes
from django.utils.http import urlencode
from polib import pofile
from pyquery import PyQuery as pq
from redo import retrying
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
log = logging.getLogger("kuma.core.utils")
def strings_are_translated(strings, locale):
# http://stackoverflow.com/a/24339946/571420
pofile_path = os.path.join(
settings.ROOT, "locale", locale, "LC_MESSAGES", "django.po"
)
try:
po = pofile(pofile_path)
except IOError: # in case the file doesn't exist or couldn't be parsed
return False
all_strings_translated = True
for string in strings:
if not any(
e
for e in po
if e.msgid == string
and (e.translated() and "fuzzy" not in e.flags)
and not e.obsolete
):
all_strings_translated = False
return all_strings_translated
def urlparams(url_, fragment=None, query_dict=None, **query):
"""
Add a fragment and/or query parameters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url_ = urlparse(url_)
fragment = fragment if fragment is not None else url_.fragment
q = url_.query
new_query_dict = (
QueryDict(smart_bytes(q), mutable=True) if q else QueryDict("", mutable=True)
)
if query_dict:
for k, l in query_dict.lists():
new_query_dict[k] = None # Replace, don't append.
for v in l:
new_query_dict.appendlist(k, v)
for k, v in query.items():
# Replace, don't append.
if isinstance(v, list):
new_query_dict.setlist(k, v)
else:
new_query_dict[k] = v
query_string = urlencode(
[(k, v) for k, l in new_query_dict.lists() for v in l if v is not None]
)
new = ParseResult(
url_.scheme, url_.netloc, url_.path, url_.params, query_string, fragment
)
return new.geturl()
def add_shared_cache_control(response, **kwargs):
"""
Adds a Cache-Control header for shared caches, like CDNs, to the
provided response.
Default settings (which can be overridden or extended):
- max-age=0 - Don't use browser cache without asking if still valid
- s-maxage=CACHE_CONTROL_DEFAULT_SHARED_MAX_AGE - Cache in the shared
cache for the default perioid of time
- public - Allow intermediate proxies to cache response
"""
nocache = response.has_header("Cache-Control") and (
"no-cache" in response["Cache-Control"]
or "no-store" in response["Cache-Control"]
)
if nocache:
return
# Set the default values.
cc_kwargs = {
"public": True,
"max_age": 0,
"s_maxage": settings.CACHE_CONTROL_DEFAULT_SHARED_MAX_AGE,
}
# Override the default values and/or add new ones.
cc_kwargs.update(kwargs)
patch_cache_control(response, **cc_kwargs)
def order_params(original_url):
"""Standardize order of query parameters."""
bits = urlsplit(original_url)
qs = sorted(parse_qsl(bits.query, keep_blank_values=True))
new_qs = urlencode(qs)
new_url = urlunsplit((bits.scheme, bits.netloc, bits.path, new_qs, bits.fragment))
return new_url
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
):
"""Opinionated wrapper that creates a requests session with a
HTTPAdapter that sets up a Retry policy that includes connection
retries.
If you do the more naive retry by simply setting a number. E.g.::
adapter = HTTPAdapter(max_retries=3)
then it will raise immediately on any connection errors.
Retrying on connection errors guards better on unpredictable networks.
From http://docs.python-requests.org/en/master/api/?highlight=retries#requests.adapters.HTTPAdapter
it says: "By default, Requests does not retry failed connections."
The backoff_factor is documented here:
https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry
A default of retries=3 and backoff_factor=0.3 means it will sleep like::
[0.3, 0.6, 1.2]
""" # noqa
session = requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def safer_pyquery(*args, **kwargs):
"""
PyQuery is magically clumsy in how it handles its arguments. A more
ideal and explicit constructor would be:
>>> from pyquery import PyQuery as pq
>>> parsed = pq(html=my_html_string)
>>> parsed = pq(url=definitely_a_url_string)
But instead, you're expected to use it like this:
>>> from pyquery import PyQuery as pq
>>> parsed = pq(my_html_string)
>>> parsed = pq(definitely_a_url_string)
...and PyQuery attempts to be smart and look at that first argument
and if it looks like a URL, it first calls `requests.get()` on it.
This function is a thin wrapper on that constructor that prevents
that dangerous code to ever get a chance.
NOTE! As of May 10 2019, this risk exists the the latest release of
PyQuery. Hopefully it will be fixed but it would a massively disruptive
change and thus unlikely to happen any time soon.
NOTE 2! Unlikely to be fixed by core pyquery team any time soon
https://github.com/gawel/pyquery/issues/203
"""
# This "if" statement is exactly what PyQuery's constructor does.
# We'll run it ourselves once and if it matches, "ruin" it by
# injecting that extra space.
if (
len(args) >= 1
and isinstance(args[0], str)
and args[0].split("://", 1)[0] in ("http", "https")
):
args = (f" {args[0]}",) + args[1:]
return pq(*args, **kwargs)
def send_mail_retrying(
subject,
message,
from_email,
recipient_list,
fail_silently=False,
auth_user=None,
auth_password=None,
connection=None,
html_message=None,
attachment=None,
**kwargs,
):
"""Copied verbatim from django.core.mail.send_mail but with the override
that we're using our EmailMultiAlternativesRetrying class instead.
See its doc string for its full documentation.
The only difference is that this function allows for setting your
own custom 'retrying' keyword argument.
"""
connection = connection or get_connection(
username=auth_user,
password=auth_password,
fail_silently=fail_silently,
)
mail = EmailMultiAlternativesRetrying(
subject, message, from_email, recipient_list, connection=connection
)
if html_message:
mail.attach_alternative(html_message, "text/html")
if attachment:
mail.attach(attachment["name"], attachment["bytes"], attachment["mime"])
return mail.send(**kwargs)
class EmailMultiAlternativesRetrying(EmailMultiAlternatives):
"""
Thin wrapper on django.core.mail.EmailMultiAlternatives that adds
a retrying functionality. By default, the only override is that
we're very explicit about the of exceptions we treat as retry'able.
The list of exceptions we use to trigger a retry are:
* smtplib.SMTPConnectError
* smtplib.SMTPServerDisconnected
Only list exceptions that have been known to happen and are safe.
"""
def send(self, *args, retry_options=None, **kwargs):
# See https://github.com/mozilla-releng/redo
# for a list of the default options to the redo.retry function
# which the redo.retrying context manager wraps.
retry_options = retry_options or {
"retry_exceptions": (SMTPConnectError, SMTPServerDisconnected),
# The default in redo is 60 seconds. Let's tone that down.
"sleeptime": 3,
"attempts": 10,
}
parent_method = super(EmailMultiAlternativesRetrying, self).send
with retrying(parent_method, **retry_options) as method:
return method(*args, **kwargs)
| mpl-2.0 | -7,837,640,281,456,378,000 | 32.114068 | 103 | 0.663452 | false |
obulpathi/poppy | tests/endtoend/test_ssl_enabled.py | 1 | 2565 | # coding= utf-8
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from tests.endtoend import base
class TestSSLCDN(base.TestBase):
"""Tests for CDN enabling with SSL on."""
def setUp(self):
super(TestSSLCDN, self).setUp()
if self.test_config.run_ssl_tests is False:
self.skipTest('SSL tests are currently disabled in configuration')
self.test_domain = base.random_string(prefix='TestCDN-SSL')
self.origin = self.test_config.ssl_origin
def test_shared_ssl_enable_cdn(self):
# Create a Poppy Service for the test website
domain_list = [{"domain": self.test_domain, "protocol": "https",
"certificate": "shared"}]
origin_list = [{"origin": self.origin, "port": 443, "ssl": True}]
caching_list = []
self.service_name = base.random_string(prefix='testService-')
resp = self.setup_service(
service_name=self.service_name,
domain_list=domain_list,
origin_list=origin_list,
caching_list=caching_list,
flavor_id=self.poppy_config.flavor)
self.service_location = resp.headers['location']
resp = self.poppy_client.get_service(location=self.service_location)
links = resp.json()['links']
origin_url = 'http://' + self.origin
access_url = [link['href'] for link in links if
link['rel'] == 'access_url']
cdn_url = 'https://' + access_url[0]
time.sleep(self.dns_config.cdn_provider_dns_sleep)
self.assertSameContent(origin_url=origin_url,
cdn_url=cdn_url)
# Benchmark page load metrics for the CDN enabled website
if self.test_config.webpagetest_enabled:
wpt_test_results = self.run_webpagetest(url=cdn_url)
print(wpt_test_results)
def tearDown(self):
self.poppy_client.delete_service(location=self.service_location)
super(TestSSLCDN, self).tearDown()
| apache-2.0 | 8,846,897,874,403,534,000 | 34.136986 | 78 | 0.640936 | false |
Alignak-monitoring-contrib/alignak-module-nsca | test/test_module.py | 1 | 19503 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
"""
Test the module
"""
import re
import os
import time
import pytest
import subprocess
from .alignak_test import AlignakTest
from alignak.modulesmanager import ModulesManager
from alignak.objects.module import Module
from alignak.basemodule import BaseModule
from alignak.brok import Brok
# Set environment variable to ask code Coverage collection
os.environ['COVERAGE_PROCESS_START'] = '.coveragerc'
import alignak_module_nsca
class TestModules(AlignakTest):
"""
This class contains the tests for the module
"""
def test_module_loading(self):
"""
Test module loading
Alignak module loading
:return:
"""
self.setup_with_file('./cfg/alignak.cfg')
self.assertTrue(self.conf_is_correct)
self.show_configuration_logs()
# No arbiter modules created
modules = [m.module_alias for m in self._arbiter.link_to_myself.modules]
self.assertListEqual(modules, [])
# No broker modules
modules = [m.module_alias for m in self._broker_daemon.modules]
self.assertListEqual(modules, [])
# No scheduler modules
modules = [m.module_alias for m in self._scheduler_daemon.modules]
self.assertListEqual(modules, ['inner-retention'])
# A receiver module
modules = [m.module_alias for m in self._receiver.modules]
self.assertListEqual(modules, ['nsca'])
def test_module_manager(self):
"""
Test if the module manager manages correctly all the modules
# CLI test: echo "host;;service;;0;;OK - test" | /usr/sbin/send_nsca -H localhost -c /etc/send_nsca.cfg -d ";;"
:return:
"""
self.setup_with_file('./cfg/alignak.cfg')
self.assertTrue(self.conf_is_correct)
self.clear_logs()
# Create an Alignak module
mod = Module({
'module_alias': 'nsca',
'module_types': 'nsca',
'python_name': 'alignak_module_nsca'
})
# Create the modules manager for a daemon type
self.modulemanager = ModulesManager(self._broker_daemon)
# Load an initialize the modules:
# - load python module
# - get module properties and instances
self.modulemanager.load_and_init([mod])
# Loading module nsca
print("Load and init")
self.show_logs()
i=0
self.assert_log_match(re.escape(
"Importing Python module 'alignak_module_nsca' for nsca..."
), i)
i += 1
# Dict order is problematic :/
# self.assert_log_match(re.escape(
# "Module properties: {'daemons': ['broker'], 'phases': ['running'], "
# "'type': 'nsca', 'external': True}"
# ), i)
i += 1
self.assert_log_match(re.escape(
"Imported 'alignak_module_nsca' for nsca"
), i)
i += 1
self.assert_log_match(re.escape(
"Loaded Python module 'alignak_module_nsca' (nsca)"
), i)
i += 1
self.assert_log_match(re.escape(
"Alignak starting module 'nsca'"
), i)
i += 1
self.assert_log_match(re.escape(
"Give an instance of alignak_module_nsca for alias: nsca"
), i)
i += 1
self.assert_log_match(re.escape(
"configuration, allowed hosts : '127.0.0.1'(5667), buffer length: 4096, "
"payload length: -1, encryption: 0, max packet age: 30, "
"check future packet: True, backlog: 10"
), i)
time.sleep(1)
# Reload the module
print("Reload")
self.modulemanager.load([mod])
self.modulemanager.get_instances()
#
# Loading module nsca
self.show_logs()
i = 0
self.assert_log_match(re.escape(
"Importing Python module 'alignak_module_nsca' for nsca..."
), i)
i += 1
# self.assert_log_match(re.escape(
# "Module properties: {'daemons': ['broker'], 'phases': ['running'], "
# "'type': 'nsca', 'external': True}"
# ), i)
i += 1
self.assert_log_match(re.escape(
"Imported 'alignak_module_nsca' for nsca"
), i)
i += 1
self.assert_log_match(re.escape(
"Loaded Python module 'alignak_module_nsca' (nsca)"
), i)
i += 1
self.assert_log_match(re.escape(
"Alignak starting module 'nsca'"
), i)
i += 1
self.assert_log_match(re.escape(
"Give an instance of alignak_module_nsca for alias: nsca"
), i)
i += 1
self.assert_log_match(re.escape(
"configuration, allowed hosts : '127.0.0.1'(5667), buffer length: 4096, "
"payload length: -1, encryption: 0, max packet age: 30, "
"check future packet: True, backlog: 10"
), i)
i += 1
self.assert_log_match(re.escape(
"Importing Python module 'alignak_module_nsca' for nsca..."
), i)
i += 1
# self.assert_log_match(re.escape(
# "Module properties: {'daemons': ['broker'], 'phases': ['running'], "
# "'type': 'nsca', 'external': True}"
# ), i)
i += 1
self.assert_log_match(re.escape(
"Imported 'alignak_module_nsca' for nsca"
), i)
i += 1
self.assert_log_match(re.escape(
"Loaded Python module 'alignak_module_nsca' (nsca)"
), i)
i += 1
self.assert_log_match(re.escape(
"Request external process to stop for nsca"
), i)
i += 1
self.assert_log_match(re.escape(
"External process stopped."
), i)
i += 1
self.assert_log_match(re.escape(
"Alignak starting module 'nsca'"
), i)
i += 1
# self.assert_log_match(re.escape(
# "Give an instance of alignak_module_nsca for alias: nsca"
# ), i)
# i += 1
self.assert_log_match(re.escape(
"Give an instance of alignak_module_nsca for alias: nsca"
), i)
i += 1
self.assert_log_match(re.escape(
"configuration, allowed hosts : '127.0.0.1'(5667), buffer length: 4096, "
"payload length: -1, encryption: 0, max packet age: 30, "
"check future packet: True, backlog: 10"
), i)
my_module = self.modulemanager.instances[0]
# Get list of not external modules
self.assertListEqual([], self.modulemanager.get_internal_instances())
for phase in ['configuration', 'late_configuration', 'running', 'retention']:
self.assertListEqual([], self.modulemanager.get_internal_instances(phase))
# Get list of external modules
self.assertListEqual([my_module], self.modulemanager.get_external_instances())
for phase in ['configuration', 'late_configuration', 'retention']:
self.assertListEqual([], self.modulemanager.get_external_instances(phase))
for phase in ['running']:
self.assertListEqual([my_module], self.modulemanager.get_external_instances(phase))
# Clear nsca
self.clear_logs()
# Start external modules
self.modulemanager.start_external_instances()
# Starting external module nsca
self.assert_log_match("Trying to initialize module: nsca", 0)
self.assert_log_match("Module nsca is initialized.", 1)
self.assert_log_match("Starting external module nsca", 2)
self.assert_log_match("Starting external process for module nsca", 3)
self.assert_log_match("nsca is now started", 4)
# Check alive
self.assertIsNotNone(my_module.process)
self.assertTrue(my_module.process.is_alive())
# Clear nsca
self.clear_logs()
# Kill the external module (normal stop is .stop_process)
my_module.kill()
time.sleep(0.1)
index = 0
self.assert_log_match("Killing external module", index)
index = index + 1
# todo: This log is not expected! But it is probably because of the py.test ...
# Indeed the receiver daemon that the module is attached to is receiving a SIGTERM !!!
self.assert_log_match(re.escape("nsca is still living 10 seconds after a normal kill, I help it to die"), index)
index = index + 1
self.assert_log_match("External module killed", index)
index = index + 1
# Should be dead (not normally stopped...) but we still know a process for this module!
self.assertIsNotNone(my_module.process)
# Nothing special ...
self.modulemanager.check_alive_instances()
self.assert_log_match("The external module nsca died unexpectedly!", index)
index = index + 1
self.assert_log_match("Setting the module nsca to restart", index)
index = index + 1
# # Try to restart the dead modules
# self.modulemanager.try_to_restart_deads()
# self.assert_log_match("Trying to restart module: nsca", index)
# index = index +1
# self.assert_log_match("Too early to retry initialization, retry period is 5 seconds", index)
# index = index +1
#
# # In fact it's too early, so it won't do it
# # The module instance is still dead
# self.assertFalse(my_module.process.is_alive())
# So we lie, on the restart tries ...
my_module.last_init_try = -5
self.modulemanager.check_alive_instances()
self.modulemanager.try_to_restart_deads()
self.assert_log_match("Trying to restart module: nsca", index)
index = index +1
self.assert_log_match("Trying to initialize module: nsca", index)
index = index +1
self.assert_log_match("Module nsca is initialized.", index)
index = index +1
self.assert_log_match("Restarting nsca...", index)
index = index +1
# The module instance is now alive again
self.assertTrue(my_module.process.is_alive())
self.assert_log_match("Starting external process for module nsca", index)
index = index + 1
self.assert_log_match("nsca is now started", index)
index = index + 1
# There is nothing else to restart in the module manager
self.assertEqual([], self.modulemanager.to_restart)
# Clear nsca
self.clear_logs()
# Let the module start and then kill it again
time.sleep(3.0)
my_module.kill()
# time.sleep(5.0)
self.show_logs()
print("My module PID 2: %s" % my_module.process.pid)
time.sleep(0.2)
self.assertFalse(my_module.process.is_alive())
index = 0
self.assert_log_match("Killing external module", index)
index = index +1
# # todo: This log is not expected! But it is probably because of the py.test ...
# # Indeed the receiver daemon that the module is attached to is receiving a SIGTERM !!!
# self.assert_log_match(re.escape("'web-services' is still living 10 seconds after a normal kill, I help it to die"), index)
# index = index +1
self.assert_log_match("External module killed", index)
index = index +1
# The module is dead but the modules manager do not know yet!
self.modulemanager.check_alive_instances()
self.assert_log_match("The external module nsca died unexpectedly!", index)
index = index +1
self.assert_log_match("Setting the module nsca to restart", index)
index = index +1
self.modulemanager.try_to_restart_deads()
self.assert_log_match("Trying to restart module: nsca", index)
index = index +1
self.assert_log_match("Too early to retry initialization, retry period is 5 seconds", index)
index = index +1
# In fact it's too early, so it won't do it
# The module instance is still dead
self.assertFalse(my_module.process.is_alive())
# So we lie, on the restart tries ...
my_module.last_init_try = -5
self.modulemanager.check_alive_instances()
self.modulemanager.try_to_restart_deads()
self.assert_log_match("Trying to restart module: nsca", index)
index = index +1
self.assert_log_match("Trying to initialize module: nsca", index)
index = index +1
self.assert_log_match("Module nsca is initialized.", index)
index = index +1
self.assert_log_match("Restarting nsca...", index)
index = index +1
# The module instance is now alive again
self.assertTrue(my_module.process.is_alive())
self.assert_log_match("Starting external process for module nsca", index)
index = index +1
self.assert_log_match("nsca is now started", index)
index = index +1
time.sleep(1.0)
print("My module PID: %s" % my_module.process.pid)
# Clear nsca
self.clear_logs()
# And we clear all now
self.modulemanager.stop_all()
# Stopping module nsca
index = 0
self.assert_log_match("Shutting down modules...", index)
index = index +1
self.assert_log_match("Request external process to stop for nsca", index)
index = index +1
self.assert_log_match(re.escape("I'm stopping module 'nsca' (pid="), index)
index = index +1
# self.assert_log_match(re.escape("'nsca' is still living after a normal kill, I help it to die"), index)
# index = index +1
self.assert_log_match(re.escape("Killing external module (pid"), index)
index = index +1
self.assert_log_match(re.escape("External module killed"), index)
index = index +1
self.assert_log_match("External process stopped.", index)
index = index +1
def test_module_start_default(self):
"""Test the module initialization function, no parameters, using default
:return:
"""
# Obliged to call to get a self.logger...
self.setup_with_file('./cfg/alignak.cfg')
self.assertTrue(self.conf_is_correct)
# Clear nsca
self.clear_logs()
# -----
# Default initialization
# -----
# Create an Alignak module
mod = Module({
'module_alias': 'nsca',
'module_types': 'passive',
'python_name': 'alignak_module_nsca'
})
instance = alignak_module_nsca.get_instance(mod)
self.assertIsInstance(instance, BaseModule)
self.show_logs()
# self.assert_log_match(
# re.escape("Give an instance of alignak_module_nsca for alias: nsca"), 0)
self.assert_log_match(re.escape(
"Give an instance of alignak_module_nsca for alias: nsca"
), 0)
self.assert_log_match(re.escape(
"configuration, allowed hosts : '127.0.0.1'(5667), buffer length: 4096, "
"payload length: -1, encryption: 0, max packet age: 30, "
"check future packet: True, backlog: 10"
), 1)
@pytest.mark.skip("Runs perfectly locally! But fails during the Travis-CI build :(")
def test_module_send_nsca(self):
"""
Test if the module manager manages correctly all the modules
# CLI test: echo "host;;service;;0;;OK - test" | /usr/sbin/send_nsca -H localhost -c /etc/send_nsca.cfg -d ";;"
:return:
"""
self.setup_with_file('./cfg/alignak.cfg')
self.assertTrue(self.conf_is_correct)
if os.path.exists("/tmp/test-nsca.log"):
os.remove("/tmp/test-nsca.log")
host = "127.0.0.1"
port = 25667
# Create an Alignak module
mod = Module({
'module_alias': 'nsca',
'module_types': 'nsca',
'python_name': 'alignak_module_nsca',
'log_level': 'DEBUG',
'host': host,
'port': port,
'test_logger': '/tmp/test-nsca.log'
})
# Create the modules manager for a daemon type
self.modulemanager = ModulesManager(self._broker_daemon)
# Load an initialize the modules:
# - load python module
# - get module properties and instances
self.modulemanager.load_and_init([mod])
# time.sleep(1)
# # Reload the module
# print("Reload")
# self.modulemanager.load([mod])
# self.modulemanager.get_instances()
my_module = self.modulemanager.instances[0]
# Start external modules
self.modulemanager.start_external_instances()
# # Starting external module nsca
# self.assert_log_match("Trying to initialize module: nsca", 7)
# self.assert_log_match("Module nsca is initialized.", 8)
# self.assert_log_match("Starting external module nsca", 9)
# self.assert_log_match("Starting external process for module nsca", 10)
# self.assert_log_match("nsca is now started", 11)
# Check alive
self.assertIsNotNone(my_module.process)
self.assertTrue(my_module.process.is_alive())
# Show and clear logs
self.show_logs()
self.clear_logs()
# Send nsca from an external script
send_nsca_process = os.system(
'echo "ABCDE;;FGHIJ;;0;;OK - test" | /usr/sbin/send_nsca -H %s -p %s -c ./send_nsca.cfg -d ";;"'
% (host, port))
print("Result = %s" % send_nsca_process)
time.sleep(3)
send_nsca_process = os.system(
'echo "host;;service;;1;;WARNING - test" | /usr/sbin/send_nsca -H %s -p %s -c ./send_nsca.cfg -d ";;"'
% (host, port))
print("Result = %s" % send_nsca_process)
time.sleep(3)
send_nsca_process = os.system(
'echo "host;;service;;2;;CRITICAL - test" | /usr/sbin/send_nsca -H %s -p %s -c ./send_nsca.cfg -d ";;"'
% (host, port))
print("Result = %s" % send_nsca_process)
time.sleep(3)
assert os.path.exists("/tmp/test-nsca.log")
cpt = 0
with open("/tmp/test-nsca.log") as f:
for line in f:
print(".%s" % line[:-1])
if "Output" in line:
if "Output: ;OK - test" in line:
cpt += 1
if "Output: ;WARNING - test" in line:
cpt += 1
if "Output: ;CRITICAL - test" in line:
cpt += 1
assert cpt == 3
# Stopping the module nsca
self.modulemanager.stop_all()
| agpl-3.0 | -3,633,992,653,995,104,000 | 35.798113 | 132 | 0.584372 | false |
cycladesnz/chambersAndCreatures | src/effects/av_effects.py | 1 | 3684 | from pdcglobal import *
from .effect import Effect
from .dv_effects import DazzleEffect
class StunEffect(Effect):
def __init__(self, host, owner):
dur = d(3)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Stuns the enemy'
def tick(self):
self.host.timer += self.host.speed * d(3)
if self.host == self.host.game.player:
self.host.game.shout('You are stunned')
else:
self.host.game.shout('%s is stunned' % (self.host.name))
Effect.tick(self)
class BleedEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Makes the enemy bleed'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_GENERIC, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are bleeding')
else:
self.host.game.shout('%s bleeds' % (self.host.name))
Effect.tick(self)
class BugPoisonEffect(Effect):
def __init__(self, host, owner):
dur = d(25)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Poisons the enemy'
def tick(self):
if d(100) < 5:
self.host.timer += self.host.speed * d(5)
if self.host == self.host.game.player:
self.host.game.shout('You suddenly fell asleep')
else:
self.host.game.shout('%s suddenly fells asleep' % (self.host.name))
Effect.tick(self)
class YumuraPoisonEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Poisons the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_POISON, self.owner)
notice = False
if d(100) < 10:
StunEffect(self.host, self.owner)
notice = True
if d(100) < 10:
DazzleEffect(self.host, self.owner)
notice = True
if d(100) < 10:
self.host.game.do_damage(self.host, d(3), D_POISON, self.owner)
notice = True
if d(100) < 2:
self.host.game.do_damage(self.host, d(25), D_POISON, self.owner)
notice = True
if notice:
if self.host == self.host.game.player:
self.host.game.shout('You are poisoned')
else:
self.host.game.shout('%s is poisoned' % (self.host.name))
Effect.tick(self)
class KillerbeePoisonEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Poisons the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_POISON, self.owner)
if d(100) < 35:
StunEffect(self.host, self.owner)
if d(100) < 35:
DazzleEffect(self.host, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are poisoned')
else:
self.host.game.shout('%s is poisoned' % (self.host.name))
Effect.tick(self)
class StrokingEffect(Effect):
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Strokes the enemy'
def tick(self):
if self.host == self.host.game.player:
self.host.game.shout('You are getting stroked by %s' % (self.owner.name))
else:
self.host.game.shout('%s is getting stroked' % (self.host.name))
Effect.tick(self)
| gpl-2.0 | 3,554,105,722,189,327,400 | 33.754717 | 85 | 0.551031 | false |
jusjusjus/Motiftoolbox | Leech_3-cell/network.py | 1 | 1647 | #!/usr/bin/env python
import time
import sys
sys.path.insert(0, '../Tools')
import network3N as netw
import tools as tl
import numpy as np
class network(netw.network):
title = "Neural Network Motif"
def __init__(self, g_inh=0.01, info=None, position=None):
netw.network.__init__(self, g_inh, info, position)
self.ax.text(0.2, 0.1, 'inhibitory coupling strength in nS', fontsize=14)
def on_button(self, event):
self.event_start = np.array([event.xdata, event.ydata])
if event.button == 1:
dist = np.zeros((netw.N_COUPLING+3), float)
for n in xrange(netw.N_COUPLING+3):
dist[n] = max(abs(np.array(self.ax.texts[n].get_position())-self.event_start))
self.i_text = np.argmin(dist)
self.ax.texts[self.i_text].set_color('r')
self.fig.canvas.draw()
if not self.i_text < netw.N_COUPLING:
self.traces.state[3*(self.i_text-netw.N_COUPLING)] += 0.001 # voltage pulse
self.traces.pulsed = (self.i_text-netw.N_COUPLING)+1
def off_button(self, event):
delta = (event.ydata-self.event_start[1])/50.
if event.button == 1:
if self.i_text < netw.N_COUPLING:
i_coupling = text2coupling[self.i_text]
new_coupling = self.coupling_strength[i_coupling]+delta
self.coupling_strength[i_coupling] = (new_coupling>0.)*new_coupling
self.refresh_coupling(self.i_text)
self.ax.texts[self.i_text].set_color('k')
else:
new_coupling = self.coupling_strength+delta
self.coupling_strength = (new_coupling>0.)*new_coupling
self.show_coupling()
self.fig.canvas.draw()
if __name__ == "__main__":
import pylab as pl
net = network()
pl.show()
| gpl-2.0 | -6,949,641,564,638,452,000 | 19.085366 | 89 | 0.657559 | false |
tensorflow/federated | tensorflow_federated/python/simulation/datasets/emnist.py | 1 | 23967 | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for the federated EMNIST dataset for simulation."""
import collections
import tensorflow as tf
from tensorflow_federated.python.simulation.datasets import download
from tensorflow_federated.python.simulation.datasets import from_tensor_slices_client_data
from tensorflow_federated.python.simulation.datasets import sql_client_data
def _add_proto_parsing(dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Add parsing of the tf.Example proto to the dataset pipeline."""
def parse_proto(tensor_proto):
parse_spec = {
'pixels': tf.io.FixedLenFeature(shape=(28, 28), dtype=tf.float32),
'label': tf.io.FixedLenFeature(shape=(), dtype=tf.int64)
}
parsed_features = tf.io.parse_example(tensor_proto, parse_spec)
return collections.OrderedDict(
label=tf.cast(parsed_features['label'], tf.int32),
pixels=parsed_features['pixels'])
return dataset.map(parse_proto, num_parallel_calls=tf.data.AUTOTUNE)
def load_data(only_digits=True, cache_dir=None):
"""Loads the Federated EMNIST dataset.
Downloads and caches the dataset locally. If previously downloaded, tries to
load the dataset from cache.
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
dataset, grouping examples by writer. Details about Leaf were published in
"LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
*Note*: This dataset does not include some additional preprocessing that
MNIST includes, such as size-normalization and centering.
In the Federated EMNIST data, the value of 1.0
corresponds to the background, and 0.0 corresponds to the color of the digits
themselves; this is the *inverse* of some MNIST representations,
e.g. in [tensorflow_datasets]
(https://github.com/tensorflow/datasets/blob/master/docs/datasets.md#mnist),
where 0 corresponds to the background color, and 255 represents the color of
the digit.
Data set sizes:
*only_digits=True*: 3,383 users, 10 label classes
- train: 341,873 examples
- test: 40,832 examples
*only_digits=False*: 3,400 users, 62 label classes
- train: 671,585 examples
- test: 77,483 examples
Rather than holding out specific users, each user's examples are split across
_train_ and _test_ so that all users have at least one example in _train_ and
one example in _test_. Writers that had less than 2 examples are excluded from
the data set.
The `tf.data.Datasets` returned by
`tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield
`collections.OrderedDict` objects at each iteration, with the following keys
and values, in lexicographic order by key:
- `'label'`: a `tf.Tensor` with `dtype=tf.int32` and shape [1], the class
label of the corresponding pixels. Labels [0-9] correspond to the digits
classes, labels [10-35] correspond to the uppercase classes (e.g., label
11 is 'B'), and labels [36-61] correspond to the lowercase classes
(e.g., label 37 is 'b').
- `'pixels'`: a `tf.Tensor` with `dtype=tf.float32` and shape [28, 28],
containing the pixels of the handwritten digit, with values in
the range [0.0, 1.0].
Args:
only_digits: (Optional) whether to only include examples that are from the
digits [0-9] classes. If `False`, includes lower and upper case
characters, for a total of 62 class labels.
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
Returns:
Tuple of (train, test) where the tuple elements are
`tff.simulation.datasets.ClientData` objects.
"""
database_path = download.get_compressed_file(
origin='https://storage.googleapis.com/tff-datasets-public/emnist_all.sqlite.lzma',
cache_dir=cache_dir)
if only_digits:
train_client_data = sql_client_data.SqlClientData(
database_path, 'digits_only_train').preprocess(_add_proto_parsing)
test_client_data = sql_client_data.SqlClientData(
database_path, 'digits_only_test').preprocess(_add_proto_parsing)
else:
train_client_data = sql_client_data.SqlClientData(
database_path, 'all_train').preprocess(_add_proto_parsing)
test_client_data = sql_client_data.SqlClientData(
database_path, 'all_test').preprocess(_add_proto_parsing)
return train_client_data, test_client_data
def get_synthetic():
"""Returns a small synthetic dataset for testing.
The single client produced has exactly 10 examples, one example for each digit
label. The images are derived from a fixed set of hard-coded images.
Returns:
A `tff.simulation.datasets.ClientData` object that matches the
characteristics (other than size) of those provided by
`tff.simulation.datasets.emnist.load_data`.
"""
return from_tensor_slices_client_data.TestClientData(
{'synthetic': _get_synthetic_digits_data()})
def _get_synthetic_digits_data():
"""Returns a dictionary suitable for `tf.data.Dataset.from_tensor_slices`.
Returns:
A dictionary that matches the structure of the data produced by
`tff.simulation.datasets.emnist.load_data`, with keys (in lexicographic
order) `label` and `pixels`.
"""
data = _SYNTHETIC_DIGITS_DATA
img_list = []
for img_array in data:
img_array = tf.constant(img_array, dtype=tf.float32) / 9.0
img_list.append(img_array)
pixels = tf.stack(img_list, axis=0)
labels = tf.constant(range(10), dtype=tf.int32)
return collections.OrderedDict(label=labels, pixels=pixels)
# pyformat: disable
# pylint: disable=bad-continuation,bad-whitespace
_SYNTHETIC_DIGITS_DATA = [
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,2,4,4,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,0,0,2,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,2,0,0,0,2,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,7,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,4,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,2,4,4,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]]
]
| apache-2.0 | 1,330,929,605,957,779,200 | 52.737668 | 90 | 0.518213 | false |
pcolmant/repanier | repanier_v2/models/notification.py | 1 | 2466 | from django.db import models
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from djangocms_text_ckeditor.fields import HTMLField
from repanier_v2.const import *
from repanier_v2.tools import cap
class Notification(models.Model):
message = HTMLField(
_("Notification"),
help_text=EMPTY_STRING,
configuration="CKEDITOR_SETTINGS_MODEL2",
default=EMPTY_STRING,
blank=True,
)
def get_notification_display(self):
return self.message
def get_html_notification_card_display(self):
if settings.REPANIER_SETTINGS_TEMPLATE != "bs3":
notification = self.get_notification_display()
if notification:
html = []
html.append(
format_html(
'<div class="card-body">{}</div>',
self.get_notification_display(),
)
)
return mark_safe(
"""
<div class="container-repanier_v2">
<div class="container">
<div class="row">
<div class="col">
<div class="card card-flash">
{html}
<a href="#" class="card-close"><i class="far fa-times-circle"></i></a>
</div>
</div>
</div>
</div>
</div>
<script type="text/javascript">
// Close card
$(".card-close").click(function(){{
$(this).parent().fadeOut(300);
$("a").click(function(){{
// Stop displaying flash in next pages
$(this).attr('href',$(this).attr('href')+"?flash=0");
return true;
}});
}});
</script>
""".format(
html=EMPTY_STRING.join(html)
)
)
return EMPTY_STRING
def __str__(self):
return cap(escape(self.get_notification_display()), 50)
class Meta:
verbose_name = _("Notification")
verbose_name_plural = _("Notifications")
| gpl-3.0 | -5,168,390,665,867,534,000 | 34.228571 | 106 | 0.452555 | false |
kbase/genome_annotation_api | lib/GenomeAnnotationAPI/utils.py | 1 | 6580 | import logging
from collections import Counter
class Utils:
def __init__(self, workspace_client):
self.ws = workspace_client
@staticmethod
def validate_params(params, expected, opt_param=set()):
"""Validates that required parameters are present. Warns if unexpected parameters appear"""
expected = set(expected)
opt_param = set(opt_param)
pkeys = set(params)
if expected - pkeys:
raise ValueError(
f"Required keys {', '.join(expected - pkeys)} not in supplied parameters")
defined_param = expected | opt_param
for param in params:
if param not in defined_param:
logging.warning("Unexpected parameter {} supplied".format(param))
def _get_field_from_ws(self, params, key):
self.validate_params(params, {'ref', }, {'feature_id_list', })
if params.get('feature_id_list'):
feature_id_list = set(params['feature_id_list'])
else:
feature_id_list = False
feature_fields = ['features', 'mrnas', 'cdss', 'non_coding_features']
ws_fields = [x + "/[*]/id" for x in feature_fields]
ws_fields += ["{}/[*]/{}".format(x, key) for x in feature_fields]
genome = self.ws.get_objects2(
{'objects': [{'ref': params['ref'], 'included': ws_fields}]}
)['data'][0]['data']
return {feature['id']: feature[key]
for field in feature_fields
for feature in genome.get(field, [])
if key in feature and
(not feature_id_list or feature['id'] in feature_id_list)}
def get_assembly(self, params):
self.validate_params(params, {'ref', })
objreq = {'objects': [{'ref': params['ref'],
'included': ['assembly_ref', 'contigset_ref']}]}
ref = self.ws.get_objects2(objreq)['data'][0]['data']
if 'assembly_ref' in ref:
return ref['assembly_ref']
if 'contigset_ref' in ref:
return ref['contigset_ref']
def get_taxon(self, params):
self.validate_params(params, {'ref', })
objreq = {'objects': [{'ref': params['ref'], 'included': ['taxon_ref']}]}
return self.ws.get_objects2(objreq)['data'][0]['data'].get('taxon_ref')
def get_feature_functions(self, params):
functions = {}
self.validate_params(params, {'ref', }, {'feature_id_list', })
if params.get('feature_id_list'):
feature_id_list = set(params['feature_id_list'])
else:
feature_id_list = False
feature_fields = ['features', 'mrnas', 'cdss', 'non_coding_features']
ws_fields = [x + "/[*]/id" for x in feature_fields]
ws_fields += [x + "/[*]/function" for x in feature_fields]
ws_fields += [x + "/[*]/functions" for x in feature_fields]
genome = self.ws.get_objects2(
{'objects': [{'ref': params['ref'], 'included': ws_fields}]}
)['data'][0]['data']
for field in feature_fields:
for feature in genome.get(field, []):
func = feature.get('function', '')
func += ", ".join(feature.get('functions', []))
if not feature_id_list or feature['id'] in feature_id_list:
functions[feature['id']] = func
return functions
def get_feature_aliases(self, params):
aliases = {}
self.validate_params(params, {'ref', }, {'feature_id_list', })
if params.get('feature_id_list'):
feature_id_list = set(params['feature_id_list'])
else:
feature_id_list = False
feature_fields = ['features', 'mrnas', 'cdss', 'non_coding_features']
ws_fields = [x + "/[*]/id" for x in feature_fields]
ws_fields += [x + "/[*]/aliases" for x in feature_fields]
ws_fields += [x + "/[*]/db_xrefs" for x in feature_fields]
genome = self.ws.get_objects2(
{'objects': [{'ref': params['ref'], 'included': ws_fields}]}
)['data'][0]['data']
for field in feature_fields:
for feature in genome.get(field, []):
a = [": ".join(x) for x in feature.get('db_xrefs', [[]])]
if feature.get('aliases'):
if isinstance(feature['aliases'][0], list):
a += [": ".join(x) for x in feature['aliases']]
else:
a += feature['aliases']
if not feature_id_list or feature['id'] in feature_id_list:
aliases[feature['id']] = a
return aliases
def get_feature_type_descriptions(self, params):
self.validate_params(params, {'ref', }, {'feature_id_list', })
if params.get('feature_id_list'):
feature_id_list = set(params['feature_id_list'])
else:
feature_id_list = False
feature_fields = {'features': 'gene', 'mrnas': 'mRNA', 'cdss': 'CDS',
'non_coding_features': 'non_coding_feature'}
ws_fields = [x + "/[*]/id" for x in feature_fields]
ws_fields += [x + "/[*]/type" for x in feature_fields]
genome = self.ws.get_objects2(
{'objects': [{'ref': params['ref'], 'included': ws_fields}]}
)['data'][0]['data']
return {feature['id']: feature.get('type', default_type)
for field, default_type in feature_fields.items()
for feature in genome.get(field, [])
if 'type' in feature and
(not feature_id_list or feature['id'] in feature_id_list)}
def get_feature_type_counts(self, params):
genome = self.ws.get_objects2(
{'objects': [{'ref': params['ref'], 'included': ['feature_counts']}]}
)['data'][0]['data']
if 'feature_counts' in genome and not params.get('feature_id_list'):
return genome['feature_counts']
else:
return Counter(self.get_feature_type_descriptions(params).values())
def get_feature_types(self, params):
return sorted(set(self.get_feature_type_counts(params).keys()))
def get_feature_locations(self, params):
return self._get_field_from_ws(params, 'location')
def get_feature_dna_sequences(self, params):
return self._get_field_from_ws(params, 'dna_sequence')
def get_feature_proteins(self, params):
return self._get_field_from_ws(params, 'protein_translation')
| mit | -594,657,575,675,096,000 | 44.694444 | 99 | 0.539666 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/virtual_machine.py | 1 | 7652 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachine(Resource):
"""Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param plan: Specifies information about the marketplace image used to
create the virtual machine. This element is only used for marketplace
images. Before you can use a marketplace image from an API, you must
enable the image for programmatic use. In the Azure portal, find the
marketplace image that you want to use and then click **Want to deploy
programmatically, Get Started ->**. Enter any required information and
then click **Save**.
:type plan: ~azure.mgmt.compute.v2016_04_30_preview.models.Plan
:param hardware_profile: Specifies the hardware settings for the virtual
machine.
:type hardware_profile:
~azure.mgmt.compute.v2016_04_30_preview.models.HardwareProfile
:param storage_profile: Specifies the storage settings for the virtual
machine disks.
:type storage_profile:
~azure.mgmt.compute.v2016_04_30_preview.models.StorageProfile
:param os_profile: Specifies the operating system settings for the virtual
machine.
:type os_profile: ~azure.mgmt.compute.v2016_04_30_preview.models.OSProfile
:param network_profile: Specifies the network interfaces of the virtual
machine.
:type network_profile:
~azure.mgmt.compute.v2016_04_30_preview.models.NetworkProfile
:param diagnostics_profile: Specifies the boot diagnostic settings state.
<br><br>Minimum api-version: 2015-06-15.
:type diagnostics_profile:
~azure.mgmt.compute.v2016_04_30_preview.models.DiagnosticsProfile
:param availability_set: Specifies information about the availability set
that the virtual machine should be assigned to. Virtual machines specified
in the same availability set are allocated to different nodes to maximize
availability. For more information about availability sets, see [Manage
the availability of virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
<br><br> For more information on Azure planned maintainance, see [Planned
maintenance for virtual machines in
Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Currently, a VM can only be added to availability set at creation
time. An existing VM cannot be added to an availability set.
:type availability_set:
~azure.mgmt.compute.v2016_04_30_preview.models.SubResource
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar instance_view: The virtual machine instance view.
:vartype instance_view:
~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineInstanceView
:param license_type: Specifies that the image or disk that is being used
was licensed on-premises. This element is only used for images that
contain the Windows Server operating system. <br><br> Possible values are:
<br><br> Windows_Client <br><br> Windows_Server <br><br> If this element
is included in a request for an update, the value must match the initial
value. This value cannot be updated. <br><br> For more information, see
[Azure Hybrid Use Benefit for Windows
Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
<br><br> Minimum api-version: 2015-06-15
:type license_type: str
:ivar vm_id: Specifies the VM unique ID which is a 128-bits identifier
that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read
using platform BIOS commands.
:vartype vm_id: str
:ivar resources: The virtual machine child extension resources.
:vartype resources:
list[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineExtension]
:param identity: The identity of the virtual machine, if configured.
:type identity:
~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'instance_view': {'readonly': True},
'vm_id': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'},
'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'},
'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'},
'diagnostics_profile': {'key': 'properties.diagnosticsProfile', 'type': 'DiagnosticsProfile'},
'availability_set': {'key': 'properties.availabilitySet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineInstanceView'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'vm_id': {'key': 'properties.vmId', 'type': 'str'},
'resources': {'key': 'resources', 'type': '[VirtualMachineExtension]'},
'identity': {'key': 'identity', 'type': 'VirtualMachineIdentity'},
}
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__(**kwargs)
self.plan = kwargs.get('plan', None)
self.hardware_profile = kwargs.get('hardware_profile', None)
self.storage_profile = kwargs.get('storage_profile', None)
self.os_profile = kwargs.get('os_profile', None)
self.network_profile = kwargs.get('network_profile', None)
self.diagnostics_profile = kwargs.get('diagnostics_profile', None)
self.availability_set = kwargs.get('availability_set', None)
self.provisioning_state = None
self.instance_view = None
self.license_type = kwargs.get('license_type', None)
self.vm_id = None
self.resources = None
self.identity = kwargs.get('identity', None)
| mit | -6,652,145,812,120,035,000 | 51.054422 | 170 | 0.674726 | false |
sjkingo/fantail | fantail/cli.py | 1 | 2628 | """
Command-line interface to the fantail site generator.
setuptools will create a console script pointing to cli:main
"""
import argparse
import logging
from fantail.staticsite import StaticSite
def cmd_init_site(args):
"""
Creates a new site with default templates.
"""
site = StaticSite(args.site_directory)
site.init_site()
def cmd_clean_site(args):
"""
Removes the output directory from an existing site.
"""
site = StaticSite(args.site_directory)
site.clean_site()
def cmd_build_site(args):
"""
Builds a site by running the generator over the pages directory.
"""
site = StaticSite(args.site_directory)
site.build_site()
def parse_args(override_args=None):
parser = argparse.ArgumentParser(description='fantail is a static site generator')
subparsers = parser.add_subparsers(dest='cmd', help='Subcommands (type subcommand -h to view help)')
def add_site_arg(this_parser):
this_parser.add_argument('site_directory', default='./fantail-site/',
nargs='?', help='Directory where the site is '
'stored. Defaults to %(default)s')
# Common arguments
parser.add_argument('-d', dest='debug', default=False, action='store_true',
help='Switch on verbose logging')
# fantail init
init_parser = subparsers.add_parser('init', description=cmd_init_site.__doc__)
add_site_arg(init_parser)
init_parser.set_defaults(func=cmd_init_site)
# fantail clean
clean_parser = subparsers.add_parser('clean', description=cmd_clean_site.__doc__)
clean_parser.add_argument('-y', dest='answer_yes', action='store_true',
help='Don\'t prompt for confirmation; assumes yes')
add_site_arg(clean_parser)
clean_parser.set_defaults(func=cmd_clean_site)
# fantail build
build_parser = subparsers.add_parser('build', description=cmd_build_site.__doc__)
add_site_arg(build_parser)
build_parser.set_defaults(func=cmd_build_site)
# If no subcommand was given, print help and exit
if override_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(override_args)
if not hasattr(args, 'func'):
parser.print_help()
exit(1)
return args
def main(override_args=None):
args = parse_args(override_args=override_args)
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO,
format='%(levelname)s: %(module)s: %(message)s')
args.func(args)
if __name__ == '__main__':
main()
| bsd-2-clause | 4,335,352,993,930,072,600 | 31.444444 | 104 | 0.644977 | false |
matroid/matroid-python | matroid/src/video_summary.py | 1 | 4766 | import requests
from matroid import error
from matroid.src.helpers import api_call
# https://staging.dev.matroid.com/docs/api/index.html#api-Video_Summary-PostSummarize
@api_call(error.InvalidQueryError)
def create_video_summary(self, url=None, videoId=None, file=None):
"""Create an video summary with provided url or file"""
(endpoint, method) = self.endpoints['create_video_summary']
if not file and not url:
raise error.InvalidQueryError(
message='Missing required parameter: file or url')
if url and file:
raise error.InvalidQueryError(
message='You may only specify a file or a URL, not both')
try:
file_to_upload = None
headers = {'Authorization': self.token.authorization_header()}
data = {}
if file:
file_to_upload = self.filereader.get_file(file)
files = {'file': file_to_upload}
return requests.request(method, endpoint, **{'headers': headers, 'files': files, 'data': data})
else:
data['url'] = url
if videoId:
data['videoId'] = videoId
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except IOError as e:
raise e
except error.InvalidQueryError as e:
raise e
except Exception as e:
raise error.APIConnectionError(message=e)
finally:
if file_to_upload:
file_to_upload.close()
# https://staging.dev.matroid.com/docs/api/index.html#api-Video_Summary-GetSummariesSummaryid
@api_call(error.InvalidQueryError)
def get_video_summary(self, summaryId):
"""Fetch a video summary"""
(endpoint, method) = self.endpoints['get_video_summary']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Video_Summary-GetSummariesSummaryidTracksCsv
@api_call(error.InvalidQueryError)
def get_video_summary_tracks(self, summaryId):
"""Fetch a video summary track CSV"""
(endpoint, method) = self.endpoints['get_video_summary_tracks']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Video_Summary-GetSummariesSummaryidVideoMp4
@api_call(error.InvalidQueryError)
def get_video_summary_file(self, summaryId):
"""Fetch a video summary video file"""
(endpoint, method) = self.endpoints['get_video_summary_file']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Video_Summary-DeleteSummariesSummaryid
@api_call(error.InvalidQueryError)
def delete_video_summary(self, summaryId):
"""Delete a video summary"""
(endpoint, method) = self.endpoints['delete_video_summary']
endpoint = endpoint.replace(':summaryId', summaryId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Video_Summary-GetStreamsStreamidSummaries
@api_call(error.InvalidQueryError)
def get_stream_summaries(self, streamId):
"""Fetch all video summaries for a stream"""
(endpoint, method) = self.endpoints['get_stream_summaries']
endpoint = endpoint.replace(':streamId', streamId)
try:
headers = {'Authorization': self.token.authorization_header()}
return requests.request(method, endpoint, **{'headers': headers})
except Exception as e:
raise error.APIConnectionError(message=e)
# https://staging.dev.matroid.com/docs/api/index.html#api-Video_Summary-PostStreamsStreamidSummarize
@api_call(error.InvalidQueryError)
def create_stream_summary(self, streamId, startTime, endTime):
"""Create a video summary for a stream"""
(endpoint, method) = self.endpoints['create_stream_summary']
endpoint = endpoint.replace(':streamId', streamId)
try:
headers = {'Authorization': self.token.authorization_header()}
data = {
'startTime': startTime,
'endTime': endTime
}
return requests.request(method, endpoint, **{'headers': headers, 'data': data})
except Exception as e:
raise error.APIConnectionError(message=e)
| mit | 2,356,276,802,457,178,000 | 35.661538 | 102 | 0.722619 | false |
kennedyshead/home-assistant | homeassistant/components/analytics/const.py | 1 | 1442 | """Constants for the analytics integration."""
from datetime import timedelta
import logging
import voluptuous as vol
ANALYTICS_ENDPOINT_URL = "https://analytics-api.home-assistant.io/v1"
ANALYTICS_ENDPOINT_URL_DEV = "https://analytics-api-dev.home-assistant.io/v1"
DOMAIN = "analytics"
INTERVAL = timedelta(days=1)
STORAGE_KEY = "core.analytics"
STORAGE_VERSION = 1
LOGGER: logging.Logger = logging.getLogger(__package__)
ATTR_ADDON_COUNT = "addon_count"
ATTR_ADDONS = "addons"
ATTR_ARCH = "arch"
ATTR_AUTO_UPDATE = "auto_update"
ATTR_AUTOMATION_COUNT = "automation_count"
ATTR_BASE = "base"
ATTR_BOARD = "board"
ATTR_CUSTOM_INTEGRATIONS = "custom_integrations"
ATTR_DIAGNOSTICS = "diagnostics"
ATTR_HEALTHY = "healthy"
ATTR_INSTALLATION_TYPE = "installation_type"
ATTR_INTEGRATION_COUNT = "integration_count"
ATTR_INTEGRATIONS = "integrations"
ATTR_ONBOARDED = "onboarded"
ATTR_OPERATING_SYSTEM = "operating_system"
ATTR_PREFERENCES = "preferences"
ATTR_PROTECTED = "protected"
ATTR_SLUG = "slug"
ATTR_STATE_COUNT = "state_count"
ATTR_STATISTICS = "statistics"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPPORTED = "supported"
ATTR_USAGE = "usage"
ATTR_USER_COUNT = "user_count"
ATTR_UUID = "uuid"
ATTR_VERSION = "version"
PREFERENCE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_BASE): bool,
vol.Optional(ATTR_DIAGNOSTICS): bool,
vol.Optional(ATTR_STATISTICS): bool,
vol.Optional(ATTR_USAGE): bool,
}
)
| apache-2.0 | -1,498,860,972,275,028,000 | 26.730769 | 77 | 0.730929 | false |
drayanaindra/monit-isp | monit_my_isp.py | 1 | 1933 | import os
import re
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
# set your directory for log or anything when you wanna say fuck
SET_DIR = os.path.dirname(os.path.abspath(' '))
NAME_FILE = 'log-myisp.txt' # custom your file name
FULL_PATH_DIR = SET_DIR + '/' + NAME_FILE
MIN_DOWN = 256 # in Kbps
# setup smtp mail
# confirm activation application in here https://accounts.google.com/b/0/DisplayUnlockCaptcha
# turn on Less secure apps in here https://www.google.com/settings/security/lesssecureapps
MAIL_ADDRESS = '' # your mail
MAIL_TARGET = '' # your target mail
PASSWORD_EMAIL = '' # your password mail
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
print 'login mail'
server.login(MAIL_ADDRESS, PASSWORD_EMAIL)
# setup email
msg = MIMEMultipart()
msg['From'] = MAIL_ADDRESS
msg['To'] = MAIL_TARGET
msg['Subject'] = "[REPORT] PT. Angsa Maen Kuda"
body = """
Halo bro,
Halo this is my auto report when download rate is bitch
Regards,
Saitama
"""
print 'Do Speedtest'
create_log = os.system('speedtest-cli --share > {isp}'.format(isp=FULL_PATH_DIR))
with open(NAME_FILE, "r") as mylog:
string_log = mylog.read().replace('\n', '')
get_down_bit = re.search('Download: (.+?) Mbit', string_log)
to_int_bit = float(get_down_bit.group(1))
# conver to kbps
convert_to_kbps = to_int_bit * 1024
if convert_to_kbps <= MIN_DOWN:
print 'send mail'
attachment = open(FULL_PATH_DIR, "rb")
msg.attach(MIMEText(body, 'plain'))
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % NAME_FILE)
msg.attach(part)
text = msg.as_string()
server.sendmail(MAIL_ADDRESS, MAIL_TARGET, text)
server.quit()
| gpl-3.0 | -6,799,284,218,235,311,000 | 28.287879 | 93 | 0.700466 | false |
C2SM/hymet_idealized | runsims_user.py | 1 | 1464 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) 2013-2014 Steven Boeing, ETHZ
# script for running a set of cases
# requires cip!
import glob
import os
import getpass
myusername=getpass.getuser()
headdir='/users/'+myusername+'/csim/fromtempl'
mycases=['bomex']
myconfs=['c1']
expglob='20150112exp_000' # which experiments to select
def intersect(a, b):
return list(set(a) & set(b))
for case in mycases:
for conf in myconfs:
# find underlying experiments
curdir=headdir+'/'+case+'/'+conf+'/'
exps=glob.glob(curdir+expglob)
subdirs=[curdir+ i for i in os.walk(curdir).next()[1]]
# make sure experiment corresponds to actual working case
for exper in intersect(exps,subdirs):
os.chdir(exper)
os.system('cip clean')
os.system('cip start')
| lgpl-3.0 | -2,831,505,908,376,665,600 | 33.857143 | 77 | 0.696721 | false |
aequitas/home-assistant | homeassistant/components/locative/device_tracker.py | 1 | 2692 | """Support for the Locative platform."""
import logging
from homeassistant.core import callback
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import (
DeviceTrackerEntity
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN as LT_DOMAIN, TRACKER_UPDATE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Configure a dispatcher connection based on a config entry."""
@callback
def _receive_data(device, location, location_name):
"""Receive set location."""
if device in hass.data[LT_DOMAIN]['devices']:
return
hass.data[LT_DOMAIN]['devices'].add(device)
async_add_entities([LocativeEntity(
device, location, location_name
)])
hass.data[LT_DOMAIN]['unsub_device_tracker'][entry.entry_id] = \
async_dispatcher_connect(hass, TRACKER_UPDATE, _receive_data)
return True
class LocativeEntity(DeviceTrackerEntity):
"""Represent a tracked device."""
def __init__(self, device, location, location_name):
"""Set up Locative entity."""
self._name = device
self._location = location
self._location_name = location_name
self._unsub_dispatcher = None
@property
def latitude(self):
"""Return latitude value of the device."""
return self._location[0]
@property
def longitude(self):
"""Return longitude value of the device."""
return self._location[1]
@property
def location_name(self):
"""Return a location name for the current location of the device."""
return self._location_name
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
async def async_added_to_hass(self):
"""Register state update callback."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, TRACKER_UPDATE, self._async_receive_data)
async def async_will_remove_from_hass(self):
"""Clean up after entity before removal."""
self._unsub_dispatcher()
@callback
def _async_receive_data(self, device, location, location_name):
"""Update device data."""
self._location_name = location_name
self._location = location
self.async_write_ha_state()
| apache-2.0 | 2,563,296,989,840,998,400 | 28.911111 | 76 | 0.647103 | false |
baifendian/harpc | admin/manage/customdecorators.py | 1 | 1412 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015 Baifendian Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.utils.six import wraps
from django.utils.decorators import available_attrs
from manage.lib.zk_harpc import ZK_HARPC
__author__ = 'caojingwei'
def auto_flush_cache(type='auto'):
"""
自动刷新缓存
"""
actual_decorator = flush_cache(
lambda u: u.is_authenticated(),
type=type,
)
return actual_decorator
def flush_cache(test_func, type):
"""
刷新缓存
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# 登录过的用户自动刷新
if test_func(request.user):
ZK_HARPC.flush_cache(type=type)
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator | apache-2.0 | 2,894,816,251,340,241,400 | 25.403846 | 72 | 0.680758 | false |
opena11y/fae2 | fae2/fae2/context_processors.py | 1 | 2101 | """
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: fae2/context_processors.py
Author: Jon Gunderson
"""
from __future__ import absolute_import
from django.contrib.sites.models import Site
from userProfiles.models import UserProfile
from django.contrib.auth.models import AnonymousUser
from fae2.settings import ANONYMOUS_ENABLED
from fae2.settings import SELF_REGISTRATION_ENABLED
from fae2.settings import PAID_SUBSCRIPTION_ENABLED
from fae2.settings import SHIBBOLETH_ENABLED
from fae2.settings import SHIBBOLETH_URL
from fae2.settings import SHIBBOLETH_NAME
from fae2.settings import PAYMENT_ENABLED
def site(request):
return {
'site': Site.objects.get_current()
}
def anonymous(request):
return {
'anonymous_enabled': ANONYMOUS_ENABLED
}
def self_registration(request):
return {
'self_registration_enabled': SELF_REGISTRATION_ENABLED
}
def paid_subscription(request):
return {
'paid_subscription_enabled': PAID_SUBSCRIPTION_ENABLED
}
def shibboleth(request):
return {
'shibboleth': { 'enabled' : SHIBBOLETH_ENABLED,
'url' : SHIBBOLETH_URL,
'name' : SHIBBOLETH_NAME
}
}
def payment_enabled(request):
return {
'payment_enabled': PAYMENT_ENABLED
}
def user_profile(request):
if request.user.id:
user_profile = UserProfile.objects.get(user=request.user)
else:
user_profile = False
if not user_profile or user_profile.user.username == 'anonymous':
user_profile = False
return {
'user_profile' : user_profile
}
| apache-2.0 | 8,637,134,552,017,623,000 | 23.717647 | 72 | 0.729177 | false |
lifemapper/LmQGIS | lifemapperTools/tools/ui_listExperimentDialog.py | 1 | 7554 | # -*- coding: utf-8 -*-
"""
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import operator
#from lifemapperTools.common.pluginconstants import KUNHM_GC_DESCRIBE_PROCESS_URL, \
# STATUS_ACCEPTED, STATUS_FAILED, STATUS_SUCCEEDED, STATUS_STARTED, FIND_STATUS
class Ui_Dialog(object):
def setupUi(self):
self.setObjectName("Dialog")
self.resize(725, 548)
self.setMinimumSize(795,548)
self.setMaximumSize(1695,1548)
self.setSizeGripEnabled(True)
self.gridLayout = QtGui.QGridLayout(self)
self.gridLayout.setObjectName("gridLayout")
self.inputGroup = QtGui.QGroupBox(self)
self.inputGroup.setObjectName("inputGroup")
self.style = QtGui.QStyleFactory.create("motif") # try plastique too!
self.inputGroup.setStyle(self.style)
self.gridLayout_input = QtGui.QGridLayout(self.inputGroup)
self.gridLayout_input.setObjectName("gridLayout_input")
# put some spacers in gridLayout_input
self.gridLayout_input.setColumnMinimumWidth(0,33)
self.gridLayout_input.setColumnMinimumWidth(1,370)
self.gridLayout_input.setColumnMinimumWidth(2,33)
self.gridLayout_input.setRowMinimumHeight(0,60)
self.gridLayout_input.setRowMinimumHeight(1,305)
self.gridLayout_input.setRowMinimumHeight(2,20)
self.gridLayout_input.setRowMinimumHeight(3,30)
self.gridLayout_input.setRowMinimumHeight(4,30)
############ end spacers ################
# radio Layout container
self.gridLayout_radio1 = QtGui.QHBoxLayout()
self.gridLayout_radio2 = QtGui.QHBoxLayout()
self.gridLayout_input.addLayout(self.gridLayout_radio1,3,0,1,3)
self.gridLayout_input.addLayout(self.gridLayout_radio2,4,0,1,3)
###########################################
self.gridLayout.addWidget(self.inputGroup, 4,0,4,0)
self.gridLayout.setRowStretch(4,6)
########### output group ####################
self.outputGroup = QtGui.QGroupBox(self)
self.outputGroup.setObjectName("outputGroup")
self.style2 = QtGui.QStyleFactory.create("motif") # try plastique too!
self.outputGroup.setStyle(self.style2)
self.gridLayout_output = QtGui.QGridLayout(self.outputGroup)
self.gridLayout_output.setObjectName("gridLayout_output")
self.gridLayout.addWidget(self.outputGroup, 4,0,4,0)
self.gridLayout.setRowStretch(4,6)
self.statuslabel = QtGui.QLabel(self.outputGroup)
self.statuslabel.setObjectName('status')
self.gridLayout_output.addWidget(self.statuslabel)
self.statuslabel.setText(QtGui.QApplication.translate("self",
'Running Process', None, QtGui.QApplication.UnicodeUTF8))
self.progressbar = QtGui.QProgressBar(self.outputGroup)
self.progressbar.setMinimum(0)
self.progressbar.setMaximum(100)
self.progressbar.setObjectName('progressbar')
self.gridLayout_output.addWidget(self.progressbar)
self.outputGroup.hide()
################## radio buttons ################
self.openProj = QtGui.QPushButton('View Experiment')
self.openProj.setToolTip("Open any layers loaded into the project")
self.gridLayout_radio1.addWidget(self.openProj)
self.openProj.setDefault(True)
#QtCore.QObject.connect(self.openProj, QtCore.SIGNAL("clicked()"), lambda: self.accept('openProj'))
self.openProj.clicked.connect(lambda: self.accept('openProj'))
self.viewBuckets = QtGui.QPushButton('Get Grids')
self.viewBuckets.setToolTip('Get Grids')
#self.viewBuckets.setChecked(True)
self.viewBuckets.setObjectName("viewBuckets")
self.gridLayout_radio1.addWidget(self.viewBuckets)
#QtCore.QObject.connect(self.viewBuckets, QtCore.SIGNAL("clicked()"), lambda: self.accept('buckets'))
self.viewBuckets.clicked.connect(lambda: self.accept('buckets'))
self.viewLayers = QtGui.QPushButton('Get PA layers')
self.viewLayers.setToolTip("Get Presence Absence Layers")
#self.viewLayers.setChecked(True)
self.viewLayers.setObjectName("viewLayers")
self.gridLayout_radio1.addWidget(self.viewLayers)
#QtCore.QObject.connect(self.viewLayers, QtCore.SIGNAL("clicked()"), lambda: self.accept('viewLyrs'))
self.viewLayers.clicked.connect(lambda: self.accept('viewLyrs'))
self.addLayer = QtGui.QPushButton('Add PA layers')
self.addLayer.setToolTip('Add Presence Absence Layers')
#self.addLayer.setChecked(False)
self.addLayer.setObjectName("addlayer")
self.gridLayout_radio1.addWidget(self.addLayer)
#QtCore.QObject.connect(self.addLayer, QtCore.SIGNAL("clicked()"), lambda: self.accept('addLyrs'))
self.addLayer.clicked.connect(lambda: self.accept('addLyrs'))
self.addSDMLayer = QtGui.QPushButton('Add LM modeled species layer')
self.addSDMLayer.setToolTip('Add Lifemapper modeled species distribution')
#self.addSDMLayer.setChecked(False)
self.addSDMLayer.setObjectName("addSDMlayer")
self.gridLayout_radio1.addWidget(self.addSDMLayer)
#QtCore.QObject.connect(self.addSDMLayer, QtCore.SIGNAL("clicked()"), lambda: self.accept('addSDM'))
self.addSDMLayer.clicked.connect(lambda: self.accept('addSDM'))
self.rejectBut = QtGui.QPushButton("Close",self)
#
self.helpBut = QtGui.QPushButton("?",self)
self.helpBut.setMaximumSize(30, 30)
self.helpBut.clicked.connect(self.help)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setObjectName("buttonBox")
self.buttonBox.addButton(self.helpBut, QtGui.QDialogButtonBox.ActionRole)
self.buttonBox.addButton(self.rejectBut, QtGui.QDialogButtonBox.ActionRole)
self.gridLayout.addWidget(self.buttonBox, 8, 0 ,7, 3)
self.rejectBut.clicked.connect(self.reject)
self.retranslateUi()
def retranslateUi(self):
self.setWindowTitle(QtGui.QApplication.translate("self",
"Range and Diversity Experiments", None, QtGui.QApplication.UnicodeUTF8))
#...............................................................................
#...............................................................................
| gpl-2.0 | 2,559,751,720,454,974,000 | 41.438202 | 107 | 0.658459 | false |
rsennrich/nematus | nematus/theano_tf_convert.py | 1 | 11403 | #!/usr/bin/env python3
import argparse
import logging
import os
import sys
import numpy as np
import tensorflow as tf
from config import load_config_from_json_file
import model_loader
import rnn_model
def construct_parameter_map(config):
def drt_tag(i):
return "" if i == 0 else "_drt_{0}".format(i)
def add_gru_variables(param_map, th_prefix, tf_prefix, drt_tag,
alt_names=False):
for th_roots, tf_root in [[["U", "U_nl"], "state_to_gates"],
[["Ux", "Ux_nl"], "state_to_proposal"],
[["W", "Wc"], "input_to_gates"],
[["Wx", "Wcx"], "input_to_proposal"],
[["b", "b_nl"], "gates_bias"],
[["bx", "bx_nl"], "proposal_bias"]]:
th_root = th_roots[1] if alt_names else th_roots[0]
if drt_tag != "" and th_root.startswith("W"):
# For deep transition, only the bottom GRU has external inputs.
continue
key = "{0}{1}{2}".format(th_prefix, th_root, drt_tag)
val = "{0}{1}:0".format(tf_prefix, tf_root)
param_map[key] = val
for th_roots, tf_root in [[["U", "U_nl"], "gates_state_norm"],
[["Ux", "Ux_nl"], "proposal_state_norm"],
[["W", "Wc"], "gates_x_norm"],
[["Wx", "Wcx"], "proposal_x_norm"]]:
th_root = th_roots[1] if alt_names else th_roots[0]
if drt_tag != "" and th_root.startswith("W"):
# For deep transition, only the bottom GRU has external inputs.
continue
key = "{0}{1}{2}_lnb".format(th_prefix, th_root, drt_tag)
val = "{0}{1}/new_mean:0".format(tf_prefix, tf_root)
param_map[key] = val
key = "{0}{1}{2}_lns".format(th_prefix, th_root, drt_tag)
val = "{0}{1}/new_std:0".format(tf_prefix, tf_root)
param_map[key] = val
th2tf = {
# encoder/embedding
'Wemb' : 'encoder/embedding/embeddings:0',
# decoder/initial_state_constructor
'ff_state_W' : 'decoder/initial_state_constructor/W:0',
'ff_state_b' : 'decoder/initial_state_constructor/b:0',
'ff_state_ln_b' : 'decoder/initial_state_constructor/new_mean:0',
'ff_state_ln_s' : 'decoder/initial_state_constructor/new_std:0',
# decoder/embedding
'Wemb_dec' : 'decoder/embedding/embeddings:0',
# decoder/base/attention
'decoder_U_att' : 'decoder/base/attention/hidden_to_score:0',
'decoder_W_comb_att' : 'decoder/base/attention/state_to_hidden:0',
'decoder_W_comb_att_lnb' : 'decoder/base/attention/hidden_state_norm/new_mean:0',
'decoder_W_comb_att_lns' : 'decoder/base/attention/hidden_state_norm/new_std:0',
'decoder_Wc_att' : 'decoder/base/attention/context_to_hidden:0',
'decoder_Wc_att_lnb' : 'decoder/base/attention/hidden_context_norm/new_mean:0',
'decoder_Wc_att_lns' : 'decoder/base/attention/hidden_context_norm/new_std:0',
'decoder_b_att' : 'decoder/base/attention/hidden_bias:0',
# decoder/next_word_predictor
'ff_logit_W' : 'decoder/next_word_predictor/hidden_to_logits/W:0',
'ff_logit_b' : 'decoder/next_word_predictor/hidden_to_logits/b:0',
'ff_logit_ctx_W' : 'decoder/next_word_predictor/attended_context_to_hidden/W:0',
'ff_logit_ctx_b' : 'decoder/next_word_predictor/attended_context_to_hidden/b:0',
'ff_logit_ctx_ln_b' : 'decoder/next_word_predictor/attended_context_to_hidden/new_mean:0',
'ff_logit_ctx_ln_s' : 'decoder/next_word_predictor/attended_context_to_hidden/new_std:0',
'ff_logit_lstm_W' : 'decoder/next_word_predictor/state_to_hidden/W:0',
'ff_logit_lstm_b' : 'decoder/next_word_predictor/state_to_hidden/b:0',
'ff_logit_lstm_ln_b' : 'decoder/next_word_predictor/state_to_hidden/new_mean:0',
'ff_logit_lstm_ln_s' : 'decoder/next_word_predictor/state_to_hidden/new_std:0',
'ff_logit_prev_W' : 'decoder/next_word_predictor/prev_emb_to_hidden/W:0',
'ff_logit_prev_b' : 'decoder/next_word_predictor/prev_emb_to_hidden/b:0',
'ff_logit_prev_ln_b' : 'decoder/next_word_predictor/prev_emb_to_hidden/new_mean:0',
'ff_logit_prev_ln_s' : 'decoder/next_word_predictor/prev_emb_to_hidden/new_std:0',
# other
'decoder_c_tt' : None,
'history_errs' : None,
'uidx' : 'time:0'}
# Add embedding variables for any additional factors.
for i in range(1, len(config.dim_per_factor)):
th_name = 'Wemb{0}'.format(i)
th2tf[th_name] = 'encoder/embedding/embeddings_{0}:0'.format(i)
# Add GRU variables for the encoder.
for i in range(config.rnn_enc_depth):
for j in range(config.rnn_enc_transition_depth):
th_prefix_f = "encoder_" + ("" if i == 0 else "{0}_".format(i+1))
tf_prefix_f = "encoder/forward-stack/level{0}/gru{1}/".format(i, j)
th_prefix_b = "encoder_r_" + ("" if i == 0 else "{0}_".format(i+1))
tf_prefix_b = "encoder/backward-stack/level{0}/gru{1}/".format(i, j)
if i % 2:
# The variable naming convention differs between the Theano and
# Tensorflow versions: in the Theano version, encoder_<i> is
# used for the i-th left-to-right encoder GRU, and encoder_r_<i>
# is used for the i-th right-to-left one. In the Tensorflow
# version, forward-stack/level0 is left-to-right and
# backward-stack/level0 is right-to-left, but then the
# directions alternate up the stack. Flipping the th_prefixes
# will map the GRU variables accordingly.
th_prefix_f, th_prefix_b = th_prefix_b, th_prefix_f
add_gru_variables(th2tf, th_prefix_f, tf_prefix_f, drt_tag(j))
add_gru_variables(th2tf, th_prefix_b, tf_prefix_b, drt_tag(j))
# Add GRU variables for the base level of the decoder.
add_gru_variables(th2tf, "decoder_", "decoder/base/gru0/", "")
for j in range(1, config.rnn_dec_base_transition_depth):
tf_prefix = "decoder/base/gru{0}/".format(j)
add_gru_variables(th2tf, "decoder_", tf_prefix, drt_tag(j-1),
alt_names=True)
# Add GRU variables for the high levels of the decoder.
for i in range(config.rnn_dec_depth-1):
for j in range(config.rnn_dec_high_transition_depth):
th_prefix = "decoder_{0}_".format(i+2)
tf_prefix = "decoder/high/level{0}/gru{1}/".format(i, j)
add_gru_variables(th2tf, th_prefix, tf_prefix, drt_tag(j))
return th2tf
def theano_to_tensorflow_config(model_path):
config = load_config_from_json_file(model_path)
setattr(config, 'reload', None)
setattr(config, 'prior_model', None)
return config
def theano_to_tensorflow_model(in_path, out_path):
saved_model = np.load(in_path)
config = theano_to_tensorflow_config(in_path)
th2tf = construct_parameter_map(config)
with tf.Session() as sess:
logging.info('Building model...')
model = rnn_model.RNNModel(config)
init = tf.zeros_initializer(dtype=tf.int32)
global_step = tf.get_variable('time', [], initializer=init, trainable=False)
saver = model_loader.init_or_restore_variables(config, sess)
seen = set()
assign_ops = []
for th_name in list(saved_model.keys()):
# ignore adam parameters
if th_name.startswith('adam'):
continue
tf_name = th2tf[th_name]
if tf_name is None:
logging.info("Not saving {} because no TF " \
"equivalent".format(th_name))
continue
assert tf_name not in seen
seen.add(tf_name)
tf_var = tf.get_default_graph().get_tensor_by_name(tf_name)
tf_shape = sess.run(tf.shape(tf_var))
th_var = saved_model[th_name]
th_shape = th_var.shape
if list(tf_shape) != list(th_shape):
logging.error("Shapes do not match for {} and " \
"{}.".format(tf_name, th_name))
logging.error("Shape of {} is {}".format(tf_name, tf_shape))
logging.error("Shape of {} is {}".format(th_name, th_shape))
sys.exit(1)
assign_ops.append(tf.assign(tf_var, th_var))
sess.run(assign_ops)
saver.save(sess, save_path=out_path)
unassigned = []
for tf_var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
if tf_var.name not in seen:
unassigned.append(tf_var.name)
logging.info("The following TF variables were not " \
"assigned: {}".format(" ".join(unassigned)))
logging.info("You should see only the 'time' variable listed")
def tensorflow_to_theano_model(in_path, out_path):
config = load_config_from_json_file(in_path)
th2tf = construct_parameter_map(config)
keys, values = list(zip(*list(th2tf.items())))
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(in_path + '.meta')
new_saver.restore(sess, in_path)
params = {}
for th_name, tf_name in list(th2tf.items()):
if tf_name is not None:
try:
v = sess.run(tf.get_default_graph().get_tensor_by_name(tf_name))
except:
logging.info("Skipping {} because it was not " \
"found".format(tf_name))
continue
else:
if th_name == 'history_errs':
v = []
elif th_name == 'decoder_c_tt':
v = np.zeros(1, dtype=np.float32)
else:
assert False, 'Need to handle {}'.format(th_name)
assert th_name not in params, '{} is repeated!'.format(th_name)
params[th_name] = v
np.savez(out_path, **params)
logging.info('Saved {} params to {}'.format(len(params), out_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--from_theano', action='store_true',
help="convert from Theano to TensorFlow format")
group.add_argument('--from_tf', action='store_true',
help="convert from Tensorflow to Theano format")
parser.add_argument('--in', type=str, required=True, metavar='PATH',
dest='inn', help="path to input model")
parser.add_argument('--out', type=str, required=True, metavar='PATH',
help="path to output model")
opts = parser.parse_args()
opts.inn = os.path.abspath(opts.inn)
opts.out = os.path.abspath(opts.out)
# Start logging.
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
if opts.from_theano:
theano_to_tensorflow_model(opts.inn, opts.out)
elif opts.from_tf:
tensorflow_to_theano_model(opts.inn, opts.out)
else:
assert False
| bsd-3-clause | 7,879,858,053,954,814,000 | 45.925926 | 98 | 0.565553 | false |
tensorflow/tfx | tfx/experimental/templates/taxi/e2e_tests/local_e2e_test.py | 1 | 3196 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""E2E test using Beam orchestrator for taxi template."""
import os
import subprocess
import sys
import unittest
from absl import logging
import tensorflow as tf
from tfx.experimental.templates import test_utils
@unittest.skipIf(tf.__version__ < '2',
'Uses keras Model only compatible with TF 2.x')
class TaxiTemplateLocalEndToEndTest(test_utils.BaseEndToEndTest):
"""This test covers step 1~6 of the accompanying document[1] for taxi template.
[1]https://github.com/tensorflow/tfx/blob/master/docs/tutorials/tfx/template.ipynb
"""
def _getAllUnitTests(self):
for root, _, files in os.walk(self._project_dir):
base_dir = os.path.relpath(root, self._project_dir)
if base_dir == '.': # project_dir == root
base_module = ''
else:
base_module = base_dir.replace(os.path.sep, '.') + '.'
for filename in files:
if filename.endswith('_test.py'):
yield base_module + filename[:-3]
def testGeneratedUnitTests(self):
self._copyTemplate('taxi')
for m in self._getAllUnitTests():
logging.info('Running unit test "%s"', m)
# A failed googletest will raise a CalledProcessError.
_ = subprocess.check_output([sys.executable, '-m', m])
def testLocalPipeline(self):
self._copyTemplate('taxi')
os.environ['LOCAL_HOME'] = os.path.join(self._temp_dir, 'local')
# Create a pipeline with only one component.
result = self._runCli([
'pipeline',
'create',
'--engine',
'local',
'--pipeline_path',
'local_runner.py',
])
self.assertIn(
'Pipeline "{}" created successfully.'.format(self._pipeline_name),
result)
# Run the pipeline.
self._runCli([
'run',
'create',
'--engine',
'local',
'--pipeline_name',
self._pipeline_name,
])
# Update the pipeline to include all components.
updated_pipeline_file = self._addAllComponents()
logging.info('Updated %s to add all components to the pipeline.',
updated_pipeline_file)
result = self._runCli([
'pipeline',
'update',
'--engine',
'local',
'--pipeline_path',
'local_runner.py',
])
self.assertIn(
'Pipeline "{}" updated successfully.'.format(self._pipeline_name),
result)
# Run the updated pipeline.
self._runCli([
'run',
'create',
'--engine',
'local',
'--pipeline_name',
self._pipeline_name,
])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 4,898,479,839,400,769,000 | 28.321101 | 84 | 0.624531 | false |
ocadotechnology/gcp-census | tests/bigquery_table_metadata_test.py | 1 | 4286 | import unittest
from gcp_census.bigquery.bigquery_table_metadata import BigQueryTableMetadata
class TestBigQueryTableMetadata(unittest.TestCase):
def test_is_daily_partitioned_should_return_False_if_is_a_partition(self):
# given
big_query_table_metadata = BigQueryTableMetadata(
{"tableReference":
{"tableId": "tableName$20170324"},
"timePartitioning": {"type": "DAY"}
}
)
# when
result = big_query_table_metadata.is_daily_partitioned()
# then
self.assertEqual(False, result)
def test_is_daily_partitioned_should_return_False_if_there_is_no_partitioning_field(self):
# given
big_query_table_metadata = BigQueryTableMetadata({})
# when
result = big_query_table_metadata.is_daily_partitioned()
# then
self.assertEqual(False, result)
def test_is_daily_partitioned_should_return_True_if_there_is_DAY_type_in_timePartitioning_field(
self
):
# given
big_query_table_metadata = BigQueryTableMetadata({
"tableReference": {"tableId": "tableName"},
"timePartitioning": {"type": "DAY"}
})
# when
result = big_query_table_metadata.is_daily_partitioned()
# then
self.assertEqual(True, result)
def test_is_daily_partitioned_should_return_False_if_no_metadata_for_table(
self
):
# given
big_query_table_metadata = BigQueryTableMetadata(None)
# when
result = big_query_table_metadata.is_daily_partitioned()
# then
self.assertEqual(False, result)
def test_is_partition_should_return_true_for_partition(self):
# given
big_query_table_metadata = BigQueryTableMetadata({
"tableReference": {
"projectId": "p1",
"datasetId": "d1",
"tableId": "t1$20171002"
}
})
# when
result = big_query_table_metadata.is_partition()
# then
self.assertEqual(True, result)
def test_is_partition_should_return_false_for_table(self):
# given
big_query_table_metadata = BigQueryTableMetadata({
"tableReference": {
"projectId": "p1",
"datasetId": "d1",
"tableId": "t1"
}
})
# when
result = big_query_table_metadata.is_partition()
# then
self.assertEqual(False, result)
def test_get_partition_id_should_return_partition_id(self):
# given
big_query_table_metadata = BigQueryTableMetadata({
"tableReference": {
"projectId": "p1",
"datasetId": "d1",
"tableId": "t1$20171002"
}
})
# when
result = big_query_table_metadata.get_partition_id()
# then
self.assertEqual("20171002", result)
def test_get_partition_id_should_raise_exception_for_tables(self):
# given
big_query_table_metadata = BigQueryTableMetadata({
"tableReference": {
"projectId": "p1",
"datasetId": "d1",
"tableId": "t1"
}
})
# when then
with self.assertRaises(AssertionError):
big_query_table_metadata.get_partition_id()
def test_get_table_id_should_return_table_id_when_partition(self):
# given
big_query_table_metadata = BigQueryTableMetadata({
"tableReference": {
"projectId": "p1",
"datasetId": "d1",
"tableId": "t1$20171002"
}
})
# when
result = big_query_table_metadata.get_table_id()
# then
self.assertEqual("t1", result)
def test_get_table_id_should_return_table_id_when_not_partitioned(self):
# given
big_query_table_metadata = BigQueryTableMetadata({
"tableReference": {
"projectId": "p1",
"datasetId": "d1",
"tableId": "t1"
}
})
# when
result = big_query_table_metadata.get_table_id()
# then
self.assertEqual("t1", result)
| apache-2.0 | 8,976,545,287,990,609,000 | 31.225564 | 100 | 0.54923 | false |
isaacj87/unity | tests/autopilot/autopilot/tests/test_ibus.py | 1 | 7160 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
# Author: Thomi Richards, Martin Mrazik
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
"""Tests to ensure unity is compatible with ibus input method."""
from testtools.matchers import Equals, NotEquals
from autopilot.emulators.ibus import (
set_active_engines,
get_available_input_engines,
)
from autopilot.matchers import Eventually
from autopilot.tests import AutopilotTestCase, multiply_scenarios
class IBusTests(AutopilotTestCase):
"""Base class for IBus tests."""
def setUp(self):
super(IBusTests, self).setUp()
def tearDown(self):
super(IBusTests, self).tearDown()
@classmethod
def setUpClass(cls):
cls._old_engines = None
cls.activate_input_engine_or_skip(cls.engine_name)
@classmethod
def tearDownClass(cls):
if cls._old_engines is not None:
set_active_engines(cls._old_engines)
@classmethod
def activate_input_engine_or_skip(cls, engine_name):
available_engines = get_available_input_engines()
if engine_name in available_engines:
cls._old_engines = set_active_engines([engine_name])
else:
raise AutopilotTestCase.skipException("This test requires the '%s' engine to be installed." % (engine_name))
def activate_ibus(self, widget):
"""Activate IBus, and wait till it's actived on 'widget'"""
self.assertThat(widget.im_active, Equals(False))
self.keyboard.press_and_release('Ctrl+Space', 0.05)
self.assertThat(widget.im_active, Eventually(Equals(True)))
def deactivate_ibus(self, widget):
"""Deactivate ibus, and wait till it's inactive on 'widget'"""
self.assertThat(widget.im_active, Equals(True))
self.keyboard.press_and_release('Ctrl+Space', 0.05)
self.assertThat(widget.im_active, Eventually(Equals(False)))
def do_dash_test_with_engine(self):
self.dash.ensure_visible()
self.addCleanup(self.dash.ensure_hidden)
self.activate_ibus(self.dash.searchbar)
self.keyboard.type(self.input)
commit_key = getattr(self, 'commit_key', None)
if commit_key:
self.keyboard.press_and_release(commit_key)
self.deactivate_ibus(self.dash.searchbar)
self.assertThat(self.dash.search_string, Eventually(Equals(self.result)))
def do_hud_test_with_engine(self):
self.hud.ensure_visible()
self.addCleanup(self.hud.ensure_hidden)
self.activate_ibus(self.hud.searchbar)
self.keyboard.type(self.input)
commit_key = getattr(self, 'commit_key', None)
if commit_key:
self.keyboard.press_and_release(commit_key)
self.deactivate_ibus(self.hud.searchbar)
self.assertThat(self.hud.search_string, Eventually(Equals(self.result)))
class IBusTestsPinyin(IBusTests):
"""Tests for the Pinyin(Chinese) input engine."""
engine_name = "pinyin"
scenarios = [
('basic', {'input': 'abc1', 'result': u'\u963f\u5e03\u4ece'}),
('photo', {'input': 'zhaopian ', 'result': u'\u7167\u7247'}),
('internet', {'input': 'hulianwang ', 'result': u'\u4e92\u8054\u7f51'}),
('disk', {'input': 'cipan ', 'result': u'\u78c1\u76d8'}),
('disk_management', {'input': 'cipan guanli ', 'result': u'\u78c1\u76d8\u7ba1\u7406'}),
]
def test_simple_input_dash(self):
self.do_dash_test_with_engine()
def test_simple_input_hud(self):
self.do_hud_test_with_engine()
class IBusTestsHangul(IBusTests):
"""Tests for the Hangul(Korean) input engine."""
engine_name = "hangul"
scenarios = [
('transmission', {'input': 'xmfostmaltus ', 'result': u'\ud2b8\ub79c\uc2a4\ubbf8\uc158 '}),
('social', {'input': 'httuf ', 'result': u'\uc18c\uc15c '}),
('document', {'input': 'anstj ', 'result': u'\ubb38\uc11c '}),
]
def test_simple_input_dash(self):
self.do_dash_test_with_engine()
def test_simple_input_hud(self):
self.do_hud_test_with_engine()
class IBusTestsAnthy(IBusTests):
"""Tests for the Anthy(Japanese) input engine."""
engine_name = "anthy"
scenarios = multiply_scenarios(
[
('system', {'input': 'shisutemu ', 'result': u'\u30b7\u30b9\u30c6\u30e0'}),
('game', {'input': 'ge-mu ', 'result': u'\u30b2\u30fc\u30e0'}),
('user', {'input': 'yu-za- ', 'result': u'\u30e6\u30fc\u30b6\u30fc'}),
],
[
('commit_j', {'commit_key': 'Ctrl+j'}),
('commit_enter', {'commit_key': 'Enter'}),
]
)
def test_simple_input_dash(self):
self.do_dash_test_with_engine()
def test_simple_input_hud(self):
self.do_hud_test_with_engine()
class IBusTestsPinyinIgnore(IBusTests):
"""Tests for ignoring key events while the Pinyin input engine is active."""
engine_name = "pinyin"
def test_ignore_key_events_on_dash(self):
self.dash.ensure_visible()
self.addCleanup(self.dash.ensure_hidden)
self.activate_ibus(self.dash.searchbar)
self.keyboard.type("cipan")
self.keyboard.press_and_release("Tab")
self.keyboard.type(" ")
self.deactivate_ibus(self.dash.searchbar)
self.assertThat(self.dash.search_string, Eventually(NotEquals(" ")))
def test_ignore_key_events_on_hud(self):
self.hud.ensure_visible()
self.addCleanup(self.hud.ensure_hidden)
self.keyboard.type("a")
self.activate_ibus(self.hud.searchbar)
self.keyboard.type("riqi")
old_selected = self.hud.selected_button
self.keyboard.press_and_release("Down")
new_selected = self.hud.selected_button
self.deactivate_ibus(self.hud.searchbar)
self.assertEqual(old_selected, new_selected)
class IBusTestsAnthyIgnore(IBusTests):
"""Tests for ignoring key events while the Anthy input engine is active."""
engine_name = "anthy"
def test_ignore_key_events_on_dash(self):
self.dash.ensure_visible()
self.addCleanup(self.dash.ensure_hidden)
self.activate_ibus(self.dash.searchbar)
self.keyboard.type("shisutemu ")
self.keyboard.press_and_release("Tab")
self.keyboard.press_and_release("Ctrl+j")
self.deactivate_ibus(self.dash.searchbar)
dash_search_string = self.dash.search_string
self.assertNotEqual("", dash_search_string)
def test_ignore_key_events_on_hud(self):
self.hud.ensure_visible()
self.addCleanup(self.hud.ensure_hidden)
self.keyboard.type("a")
self.activate_ibus(self.hud.searchbar)
self.keyboard.type("hiduke")
old_selected = self.hud.selected_button
self.keyboard.press_and_release("Down")
new_selected = self.hud.selected_button
self.deactivate_ibus(self.hud.searchbar)
self.assertEqual(old_selected, new_selected)
| gpl-3.0 | 1,277,520,508,238,889,500 | 34.270936 | 120 | 0.639385 | false |
knightdf/tiantian | application/log/logger.py | 1 | 1198 | # coding=utf-8
import logging
from logging.handlers import SMTPHandler
from handlers import MultiProcessTimedRotatingFileHandler
from application import config
_Levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARN': logging.WARN,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
log = logging.getLogger('wechat')
log.setLevel(_Levels.get(str.upper(config.LogLevel or ''), logging.NOTSET))
log.propagate = False
__h1 = MultiProcessTimedRotatingFileHandler(config.LogPath or 'wechat.log', 'midnight')
__h1.setLevel(logging.DEBUG)
__f = logging.Formatter('%(asctime)s [%(name)s] %(levelname)s: %(message)s')
__h1.setFormatter(__f)
if config.MailNotifyEnable:
__h2 = SMTPHandler(config.MailHost, config.MailFrom, config.MailTo,\
'New Critical Event From [WeChat: TianTian]', (config.MailFrom, config.MailPass))
__h2.setLevel(logging.CRITICAL)
__h2.setFormatter(__f)
log.addHandler(__h2)
log.addHandler(__h1)
if __name__ == '__main__':
log.debug('debug message')
log.info('info message')
log.warn('warn message')
log.error('error message')
log.critical('critical message')
| gpl-2.0 | -4,613,170,647,903,772,000 | 26.860465 | 93 | 0.691987 | false |
lgunsch/django-vmail | vmail/tests/command_tests.py | 1 | 7582 | """
Test the virtual mail management commands.
"""
import sys
import StringIO
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from ..models import MailUser, Domain, Alias
class BaseCommandTestCase(object):
fixtures = ['vmail_model_testdata.json']
def setUp(self):
self.syserr = sys.stderr
sys.stderr = StringIO.StringIO()
self.sysout = sys.stdout
sys.stdout = StringIO.StringIO()
def tearDown(self):
sys.stdout.close()
sys.stdout = self.sysout
sys.stderr.close()
sys.stderr = self.syserr
def assertSystemExit(self, *args, **opts):
"""
Apply the given arguments and options to the current command in
`self.cmd` and ensure that CommandError is raised. Default
aurguments `verbosity=0` and `interactive=False` are applied
if they are not provided.
"""
default_opts = {'verbosity': 0, 'interactive': False}
opts = dict(default_opts.items() + opts.items())
self.assertRaises(CommandError, call_command, self.cmd, *args, **opts)
def test_bad_arg_len(self):
"""Test that an incorrect # of positional arguments raises an error."""
self.assertSystemExit(*range(self.arglen - 1))
self.assertSystemExit(*range(self.arglen + 1))
class TestChangePassword(BaseCommandTestCase, TestCase):
cmd = 'vmail-chpasswd'
arglen = 3
def _test_change_password(self, pk_):
old_pw = 'password'
new_pw = 'new_password'
user = MailUser.objects.get(pk=pk_)
user.set_password(old_pw)
user.save()
self.assertTrue(user.check_password(old_pw))
call_command(self.cmd, str(user), old_pw, new_pw)
user = MailUser.objects.get(pk=pk_)
self.assertTrue(user.check_password(new_pw))
def test_change_password(self):
"""Validate change password works as expected."""
self._test_change_password(1)
self._test_change_password(7)
self._test_change_password(8)
def test_bad_old_password(self):
user = '[email protected]'
self.assertSystemExit(user, 'old pw', 'new pw')
def test_bad_email(self):
"""Test a proper email is required."""
self.assertSystemExit('', None, None)
self.assertSystemExit('@', None, None)
self.assertSystemExit('[email protected]', None, None)
self.assertSystemExit(' [email protected] ', None, None)
def test_bad_domain(self):
"""Test a valid domain is required."""
user = '[email protected]'
self.assertSystemExit(user, 'old pw', 'new pw')
def test_bad_mailuser(self):
"""Test a valid user is required."""
user = '[email protected]'
self.assertSystemExit(user, 'old pw', 'new pw')
class TestSetPassword(BaseCommandTestCase, TestCase):
cmd = 'vmail-setpasswd'
arglen = 2
def test_bad_email(self):
"""Test a proper email is required."""
self.assertSystemExit('', None)
self.assertSystemExit('@', None)
self.assertSystemExit('[email protected]', None)
self.assertSystemExit(' [email protected] ', None)
def test_bad_domain(self):
"""Test a valid domain is required."""
user = '[email protected]'
self.assertSystemExit(user, 'new pw')
def test_bad_mailuser(self):
"""Test a valid user is required."""
user = '[email protected]'
self.assertSystemExit(user, 'new pw')
def _test_change_password(self, pk_):
old_pw = 'password'
new_pw = 'new_password'
user = MailUser.objects.get(pk=pk_)
user.set_password(old_pw)
user.save()
self.assertTrue(user.check_password(old_pw))
call_command(self.cmd, str(user), new_pw)
user = MailUser.objects.get(pk=pk_)
self.assertTrue(user.check_password(new_pw))
def test_change_password(self):
"""Validate change password works as expected."""
self._test_change_password(1)
self._test_change_password(7)
self._test_change_password(8)
class TestAddMBoxPassword(BaseCommandTestCase, TestCase):
cmd = 'vmail-addmbox'
arglen = 1
def test_bad_email(self):
"""Test a proper email is required."""
self.assertSystemExit('')
self.assertSystemExit('@')
self.assertSystemExit('[email protected]')
self.assertSystemExit(' [email protected] ')
def test_user_already_exests(self):
user = MailUser.objects.get(pk=1)
self.assertSystemExit(str(user))
def test_create_user(self):
domain = Domain.objects.get(pk=1)
user = 'me'
call_command(self.cmd, '{0}@{1}'.format(user, domain))
created_user = MailUser.objects.get(username=user, domain__fqdn=str(domain))
self.assertEqual(created_user.username, user)
self.assertEqual(created_user.domain, domain)
def test_create_user_domain_not_exists(self):
user = 'me'
domain = 'unknown.com'
self.assertSystemExit('{0}@{1}'.format(user, domain))
call_command(self.cmd, '{0}@{1}'.format(user, domain), create_domain=True)
created_user = MailUser.objects.get(username=user, domain__fqdn=str(domain))
self.assertEqual(created_user.username, user)
self.assertEqual(created_user.domain.fqdn, domain)
def test_create_user_with_password(self):
user = 'me'
domain = 'example.com'
password = 'my_new_password'
call_command(self.cmd, '{0}@{1}'.format(user, domain), password=password)
created_user = MailUser.objects.get(username=user, domain__fqdn=str(domain))
self.assertTrue(created_user.check_password(password))
self.assertEqual(created_user.username, user)
self.assertEqual(created_user.domain.fqdn, domain)
class TestAddAlias(BaseCommandTestCase, TestCase):
cmd = 'vmail-addalias'
arglen = 3
def test_bad_destination_email(self):
"""Test a proper email is required."""
# Only destination is required to be a valid email address
self.assertSystemExit(str(self.domain), self.source, '')
self.assertSystemExit(str(self.domain), self.source, '@')
self.assertSystemExit(str(self.domain), self.source, '[email protected]')
self.assertSystemExit(str(self.domain), self.source, ' [email protected] ')
def setUp(self):
super(TestAddAlias, self).setUp()
self.domain = Domain.objects.get(pk=1)
self.source = "[email protected]"
self.destination = "[email protected]"
def test_add_alias(self):
call_command(self.cmd, str(self.domain), self.source, self.destination)
self._assert_created()
def test_add_catchall(self):
self.source = '@example.com'
call_command(self.cmd, str(self.domain), self.source, self.destination)
self._assert_created()
def test_add_alias_domain_has_at_symbol(self):
call_command(
self.cmd, '@{0}'.format(self.domain), self.source, self.destination)
self._assert_created()
def _assert_created(self):
alias = Alias.objects.get(domain__fqdn=str(self.domain),
source=self.source,
destination=self.destination)
self.assertTrue(alias.active)
def test_aliase_exists(self):
call_command(self.cmd, str(self.domain), self.source, self.destination)
self.assertSystemExit(self.cmd, str(self.domain), self.source, self.destination)
| mit | -8,373,753,192,189,958,000 | 33 | 88 | 0.628726 | false |
lcy-seso/Paddle | python/paddle/dataset/uci_housing.py | 1 | 3748 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
UCI Housing dataset.
This module will download dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators.
"""
import numpy as np
import os
import paddle.dataset.common
__all__ = ['train', 'test']
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
MD5 = 'd4accdce7a25600298819f8e28e8d593'
feature_names = [
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
'PTRATIO', 'B', 'LSTAT', 'convert'
]
UCI_TRAIN_DATA = None
UCI_TEST_DATA = None
URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fit_a_line.tar'
MD5_MODEL = '52fc3da8ef3937822fcdd87ee05c0c9b'
def feature_range(maximums, minimums):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
feature_num = len(maximums)
ax.bar(range(feature_num), maximums - minimums, color='r', align='center')
ax.set_title('feature scale')
plt.xticks(range(feature_num), feature_names)
plt.xlim([-1, feature_num])
fig.set_figheight(6)
fig.set_figwidth(10)
if not os.path.exists('./image'):
os.makedirs('./image')
fig.savefig('image/ranges.png', dpi=48)
plt.close(fig)
def load_data(filename, feature_num=14, ratio=0.8):
global UCI_TRAIN_DATA, UCI_TEST_DATA
if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:
return
data = np.fromfile(filename, sep=' ')
data = data.reshape(data.shape[0] / feature_num, feature_num)
maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
axis=0) / data.shape[0]
feature_range(maximums[:-1], minimums[:-1])
for i in xrange(feature_num - 1):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
offset = int(data.shape[0] * ratio)
UCI_TRAIN_DATA = data[:offset]
UCI_TEST_DATA = data[offset:]
def train():
"""
UCI_HOUSING training set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Training reader creator
:rtype: callable
"""
global UCI_TRAIN_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TRAIN_DATA:
yield d[:-1], d[-1:]
return reader
def test():
"""
UCI_HOUSING test set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Test reader creator
:rtype: callable
"""
global UCI_TEST_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TEST_DATA:
yield d[:-1], d[-1:]
return reader
def fetch():
paddle.dataset.common.download(URL, 'uci_housing', MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train")
paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
| apache-2.0 | 941,708,143,566,697,000 | 28.984 | 91 | 0.670491 | false |
obdg/plda | testdata/format.py | 1 | 1245 | #!/usr/bin/env python
import io
import sys
def parseLine(line):
l = line.strip().split()
return int(l[0]), int(l[1]), int(l[2])
def convert(dataName, f):
with open("docword." + dataName + ".txt", 'r') as doc, open("vocab." + dataName + ".txt", 'r') as voc:
vocList = []
for w in voc:
vocList.append(w.strip())
print vocList
docs = doc.readlines()
docNum = int(docs[0])
wordNum = int(docs[1])
assert wordNum == len(vocList)
docIndex, wordIndex, count = parseLine(docs[3])
newLine = str(vocList[wordIndex-1] + " " + str(count)) + " "
for i in range(4, len(docs)):
print i
d, w, c = parseLine(docs[i])
#print docs[i]
if d == docIndex:
newLine = newLine + vocList[w-1] + " " + str(c) + " "
else :
docIndex = d
f.write(newLine + "\n")
newLine = str(vocList[w-1] + " " + str(c)) + " "
if __name__ == '__main__':
if len(sys.argv) < 2:
print "no argument"
exit()
dataName = sys.argv[1]
print dataName
with open(dataName + ".txt", 'w') as of:
convert(dataName, of)
| apache-2.0 | -9,169,616,368,775,824,000 | 22.942308 | 106 | 0.481124 | false |
buzztroll/staccato | staccato/openstack/common/sslutils.py | 1 | 2482 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ssl
from oslo.config import cfg
from staccato.openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
default=None,
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('cert_file',
default=None,
help="Certificate file to use when starting "
"the server securely"),
cfg.StrOpt('key_file',
default=None,
help="Private key file to use when starting "
"the server securely"),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
| apache-2.0 | -8,596,725,972,665,908,000 | 30.025 | 78 | 0.617647 | false |
SynAckPwn23/SCTF | SCTF/tests.py | 1 | 1987 | from itertools import chain, repeat
from cities_light.models import Country
from django.contrib.auth import get_user_model
from django.test import TestCase
from accounts.models import Team, UserProfile
from challenges.models import Category, Challenge, ChallengeSolved
class UserScoreTest(TestCase):
def setUp(self):
test_category = Category.objects.create(name='test')
country = Country.objects.create(name='Italy')
# create 9 users: u0, u1, ... u8
self.users = [get_user_model().objects.create_user(
'u{}'.format(i),
'u{}@test.com'.format(i),
'u1u2u3u4'
) for i in range(9)]
# create 3 teams: t0, t1, t2
self.teams = [Team.objects.create(
name='t{}'.format(i), created_by_id=i+1
) for i in range(3)]
# teams - users association: t0: (u0, u1, u2), t1: (u3, u4, u5), t2: (u6, u7, u8)
teams_users = chain.from_iterable(repeat(t, 3) for t in self.teams)
# create users profile
for u in self.users:
UserProfile.objects.create(user=u, job='job', gender='M', country=country, team=next(teams_users))
# create 9 challenges: c0, c1, ..., c8
self.challenges = [
Challenge.objects.create(name='c{}'.format(i), points=i, category=test_category)
for i in range(9)
]
# solved challenges: each user u_i solves all challenges from c_0 to c_i (ie: u2 solves c0,c1,c2)
for i, u in enumerate(self.users, 1):
for c in self.challenges[:i]:
ChallengeSolved.objects.create(user=u.profile, challenge=c)
def test_users_score(self):
for i, u in enumerate(UserProfile.objects.annotate_score().all(), 1):
self.assertEqual(u.points, sum(c.points for c in self.challenges[:i]))
def test_users_score(self):
for i, u in enumerate(UserProfile.objects.ordered().all(), 1):
self.assertEqual(u.position, i)
| gpl-3.0 | -6,817,270,044,321,183,000 | 35.796296 | 110 | 0.613488 | false |
bdang2012/taiga-back-casting | taiga/export_import/tasks.py | 1 | 3191 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
import sys
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.utils import timezone
from django.conf import settings
from django.utils.translation import ugettext as _
from taiga.base.mails import mail_builder
from taiga.celery import app
from .service import render_project
from .dump_service import dict_to_project
from .renderers import ExportRenderer
logger = logging.getLogger('taiga.export_import')
import resource
@app.task(bind=True)
def dump_project(self, user, project):
path = "exports/{}/{}-{}.json".format(project.pk, project.slug, self.request.id)
storage_path = default_storage.path(path)
try:
url = default_storage.url(path)
with default_storage.open(storage_path, mode="w") as outfile:
render_project(project, outfile)
except Exception:
ctx = {
"user": user,
"error_subject": _("Error generating project dump"),
"error_message": _("Error generating project dump"),
"project": project
}
email = mail_builder.export_error(user, ctx)
email.send()
logger.error('Error generating dump %s (by %s)', project.slug, user, exc_info=sys.exc_info())
return
deletion_date = timezone.now() + datetime.timedelta(seconds=settings.EXPORTS_TTL)
ctx = {
"url": url,
"project": project,
"user": user,
"deletion_date": deletion_date
}
email = mail_builder.dump_project(user, ctx)
email.send()
@app.task
def delete_project_dump(project_id, project_slug, task_id):
default_storage.delete("exports/{}/{}-{}.json".format(project_id, project_slug, task_id))
@app.task
def load_project_dump(user, dump):
try:
project = dict_to_project(dump, user.email)
except Exception:
ctx = {
"user": user,
"error_subject": _("Error loading project dump"),
"error_message": _("Error loading project dump"),
}
email = mail_builder.import_error(user, ctx)
email.send()
logger.error('Error loading dump %s (by %s)', project.slug, user, exc_info=sys.exc_info())
return
ctx = {"user": user, "project": project}
email = mail_builder.load_dump(user, ctx)
email.send()
| agpl-3.0 | -436,636,526,584,997,570 | 32.568421 | 101 | 0.67482 | false |
zqfan/leetcode | algorithms/304. Range Sum Query 2D - Immutable/solution.py | 1 | 1184 | class NumMatrix(object):
def __init__(self, matrix):
"""
:type matrix: List[List[int]]
"""
m, n = len(matrix), len(matrix[0] if matrix else [])
self._sum = [[0] * n for i in xrange(m)]
col_sum = [0] * n
for i in xrange(m):
s = 0
for j in xrange(n):
col_sum[j] += matrix[i][j]
s += col_sum[j]
self._sum[i][j] = s
def sumRegion(self, row1, col1, row2, col2):
"""
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
m, n = len(self._sum), len(self._sum[0] if self._sum else [])
if not (0 <= row1 <= row2 < m and 0 <= col1 <= col2 < n):
return 0
s = self._sum[row2][col2]
if row1 > 0:
s -= self._sum[row1-1][col2]
if col1 > 0:
s -= self._sum[row2][col1-1]
if row1 > 0 and col1 > 0:
s += self._sum[row1-1][col1-1]
return s
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| gpl-3.0 | -5,383,911,250,569,336,000 | 28.6 | 69 | 0.460304 | false |
dgoodwin/wuja | test/configTests.py | 1 | 5142 | # Wuja - Google Calendar (tm) notifications for the GNOME desktop.
#
# Copyright (C) 2006 Devan Goodwin <[email protected]>
# Copyright (C) 2006 James Bowes <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" Tests for the Wuja configuration object. """
__revision__ = "$Revision$"
import unittest
import gconf
import os.path
import settestpath
from wuja.config import WujaConfiguration
from wuja.config import DEFAULT_TIMESTAMP_FORMAT, DEFAULT_REMINDER,\
DEFAULT_SNOOZE
GCONF_TEST_PATH = '/apps/wuja/test'
class WujaConfigurationTests(unittest.TestCase):
def setUp(self):
self.config = WujaConfiguration(GCONF_TEST_PATH)
def tearDown(self):
# NOTE: Couldn't find a way to actually delete the directory, this just
# unsets all the properties beneath it.
client = gconf.client_get_default()
client.recursive_unset(GCONF_TEST_PATH,
gconf.UNSET_INCLUDING_SCHEMA_NAMES)
def test_add_feed_url(self):
self.config.add_feed_url('url1')
urls = self.config.get_feed_urls()
self.assertEqual(1, len(urls))
self.assertEqual('url1', urls[0])
def test_add_multiple_feed_urls(self):
urls = ['url1', 'url2', 'url3']
for url in urls:
self.config.add_feed_url(url)
result_urls = self.config.get_feed_urls()
self.assertEqual(urls, result_urls)
def test_remove_feed_url(self):
urls = ['url1', 'url2', 'url3']
for url in urls:
self.config.add_feed_url(url)
self.config.remove_feed_url(urls[1])
self.assertEqual([urls[0], urls[2]], self.config.get_feed_urls())
def test_remove_nonexistent_url(self):
urls = ['url1', 'url2', 'url3']
for url in urls:
self.config.add_feed_url(url)
self.assertRaises(ValueError, self.config.remove_feed_url,
'not a real url')
def test_remove_all_urls(self):
urls = ['url1', 'url2', 'url3']
for url in urls:
self.config.add_feed_url(url)
self.config.remove_all_feed_urls()
self.assertEqual(0, len(self.config.get_feed_urls()))
def test_basic_url_replacement(self):
""" /basic URLs lack data Wuja needs. """
urls = ['url1/basic']
for url in urls:
self.config.add_feed_url(url)
lookup_urls = self.config.get_feed_urls()
self.assertEqual('url1/full', lookup_urls[0])
def test_ignore_empty_url(self):
""" Add an emptry string URL and ensure it doesn't get added to
the configuration.
"""
self.assertEqual(0, len(self.config.get_feed_urls()))
self.config.add_feed_url('')
self.assertEqual(0, len(self.config.get_feed_urls()))
def test_default_timestamp_format(self):
"""
If no timestamp is defined in gconf, test that a default
value is returned.
"""
self.assertEqual(DEFAULT_TIMESTAMP_FORMAT,
self.config.get_timestamp_format())
def test_set_timestamp_format(self):
client = gconf.client_get_default()
self.assertEqual(None, client.get_string(os.path.join(GCONF_TEST_PATH,
"timestamp_format")))
new_format = "%H:%M"
self.config.set_timestamp_format(new_format)
self.assertEqual(new_format, self.config.get_timestamp_format())
def test_set_reminder(self):
client = gconf.client_get_default()
self.assertEqual(None, client.get_string(os.path.join(GCONF_TEST_PATH,
"default_reminder")))
# Reminder should default to 10 if the user hasn't set one explicitly:
self.assertEqual(DEFAULT_REMINDER, self.config.get_reminder())
self.config.set_reminder(25)
self.assertEqual(25, self.config.get_reminder())
def test_set_snooze(self):
client = gconf.client_get_default()
self.assertEqual(None, client.get_string(os.path.join(GCONF_TEST_PATH,
"snooze_mins")))
# Snooze should default to 10 if the user hasn't set one explicitly:
self.assertEqual(DEFAULT_SNOOZE, self.config.get_snooze())
self.config.set_snooze(25)
self.assertEqual(25, self.config.get_snooze())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WujaConfigurationTests))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
| gpl-2.0 | -486,767,916,604,198,300 | 34.462069 | 79 | 0.648969 | false |
Atihinen/ProjectUnchained | project_unchained/project_unchained/settings.py | 1 | 2005 | """
Django settings for project_unchained project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v=ujv#5cs^9k4b-x=k*(3*@8sw3r)_-kp+kc0w-y3-ibjer90-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'project_unchained.urls'
WSGI_APPLICATION = 'project_unchained.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| mit | -2,116,393,972,507,239,200 | 23.45122 | 71 | 0.723691 | false |
googleapis/python-spanner-django | tests/unit/django_spanner/test_compiler.py | 1 | 6961 | # Copyright 2021 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
from django.core.exceptions import EmptyResultSet
from django.db.utils import DatabaseError
from django_spanner.compiler import SQLCompiler
from django.db.models.query import QuerySet
from tests.unit.django_spanner.simple_test import SpannerSimpleTestClass
from .models import Number
class TestCompiler(SpannerSimpleTestClass):
def test_unsupported_ordering_slicing_raises_db_error(self):
"""
Tries limit/offset and order by in subqueries which are not supported
by spanner.
"""
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = "LIMIT/OFFSET not allowed in subqueries of compound statements"
with self.assertRaisesRegex(DatabaseError, msg):
list(qs1.union(qs2[:10]))
msg = "ORDER BY not allowed in subqueries of compound statements"
with self.assertRaisesRegex(DatabaseError, msg):
list(qs1.order_by("id").union(qs2))
def test_get_combinator_sql_all_union_sql_generated(self):
"""
Tries union sql generator.
"""
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=8).values("num")
qs4 = qs1.union(qs2)
compiler = SQLCompiler(qs4.query, self.connection, "default")
sql_compiled, params = compiler.get_combinator_sql("union", True)
self.assertEqual(
sql_compiled,
[
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s UNION ALL SELECT tests_number.num "
+ "FROM tests_number WHERE tests_number.num >= %s"
],
)
self.assertEqual(params, [1, 8])
def test_get_combinator_sql_distinct_union_sql_generated(self):
"""
Tries union sql generator with distinct.
"""
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=8).values("num")
qs4 = qs1.union(qs2)
compiler = SQLCompiler(qs4.query, self.connection, "default")
sql_compiled, params = compiler.get_combinator_sql("union", False)
self.assertEqual(
sql_compiled,
[
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s UNION DISTINCT SELECT "
+ "tests_number.num FROM tests_number WHERE "
+ "tests_number.num >= %s"
],
)
self.assertEqual(params, [1, 8])
def test_get_combinator_sql_difference_all_sql_generated(self):
"""
Tries difference sql generator.
"""
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=8).values("num")
qs4 = qs1.difference(qs2)
compiler = SQLCompiler(qs4.query, self.connection, "default")
sql_compiled, params = compiler.get_combinator_sql("difference", True)
self.assertEqual(
sql_compiled,
[
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s EXCEPT ALL SELECT tests_number.num "
+ "FROM tests_number WHERE tests_number.num >= %s"
],
)
self.assertEqual(params, [1, 8])
def test_get_combinator_sql_difference_distinct_sql_generated(self):
"""
Tries difference sql generator with distinct.
"""
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=8).values("num")
qs4 = qs1.difference(qs2)
compiler = SQLCompiler(qs4.query, self.connection, "default")
sql_compiled, params = compiler.get_combinator_sql("difference", False)
self.assertEqual(
sql_compiled,
[
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s EXCEPT DISTINCT SELECT "
+ "tests_number.num FROM tests_number WHERE "
+ "tests_number.num >= %s"
],
)
self.assertEqual(params, [1, 8])
def test_get_combinator_sql_union_and_difference_query_together(self):
"""
Tries sql generator with union of queryset with queryset of difference.
"""
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=8).values("num")
qs3 = Number.objects.filter(num__exact=10).values("num")
qs4 = qs1.union(qs2.difference(qs3))
compiler = SQLCompiler(qs4.query, self.connection, "default")
sql_compiled, params = compiler.get_combinator_sql("union", False)
self.assertEqual(
sql_compiled,
[
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s UNION DISTINCT SELECT * FROM ("
+ "SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num >= %s EXCEPT DISTINCT "
+ "SELECT tests_number.num FROM tests_number "
+ "WHERE tests_number.num = %s)"
],
)
self.assertEqual(params, [1, 8, 10])
def test_get_combinator_sql_parentheses_in_compound_not_supported(self):
"""
Tries sql generator with union of queryset with queryset of difference,
adding support for parentheses in compound sql statement.
"""
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=8).values("num")
qs3 = Number.objects.filter(num__exact=10).values("num")
qs4 = qs1.union(qs2.difference(qs3))
compiler = SQLCompiler(qs4.query, self.connection, "default")
compiler.connection.features.supports_parentheses_in_compound = False
sql_compiled, params = compiler.get_combinator_sql("union", False)
self.assertEqual(
sql_compiled,
[
"SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num <= %s UNION DISTINCT SELECT * FROM ("
+ "SELECT tests_number.num FROM tests_number WHERE "
+ "tests_number.num >= %s EXCEPT DISTINCT "
+ "SELECT tests_number.num FROM tests_number "
+ "WHERE tests_number.num = %s)"
],
)
self.assertEqual(params, [1, 8, 10])
def test_get_combinator_sql_empty_queryset_raises_exception(self):
"""
Tries sql generator with empty queryset.
"""
compiler = SQLCompiler(QuerySet().query, self.connection, "default")
with self.assertRaises(EmptyResultSet):
compiler.get_combinator_sql("union", False)
| bsd-3-clause | 754,637,484,480,902,400 | 39.236994 | 79 | 0.596322 | false |
Proactuary/ChessLearner | sunfish.py | 1 | 19713 | #!/usr/bin/env pypy
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import random
from itertools import count
from collections import Counter, OrderedDict, namedtuple
ARG_OUTPUTFILE = 2
GAMES_PLAYED = 10
# The table size is the maximum number of elements in the transposition table.
TABLE_SIZE = 1e6
# This constant controls how much time we spend on looking for optimal moves.
NODES_SEARCHED = 1e4
# Mate value must be greater than 8*queen + 2*(rook+knight+bishop)
# King value is set to twice this value such that if the opponent is
# 8 queens up, but we got the king, we still exceed MATE_VALUE.
MATE_VALUE = 30000
# Our board is represented as a 120 character string. The padding allows for
# fast detection of moves that don't stay within the board.
A1, H1, A8, H8 = 91, 98, 21, 28
initial = (
' \n' # 0 - 9
' \n' # 10 - 19
' rnbqkbnr\n' # 20 - 29
' pppppppp\n' # 30 - 39
' ........\n' # 40 - 49
' ........\n' # 50 - 59
' ........\n' # 60 - 69
' ........\n' # 70 - 79
' PPPPPPPP\n' # 80 - 89
' RNBQKBNR\n' # 90 - 99
' \n' # 100 -109
' ' # 110 -119
)
###############################################################################
# Move and evaluation tables
###############################################################################
N, E, S, W = -10, 1, 10, -1
directions = {
'P': (N, 2*N, N+W, N+E),
'N': (2*N+E, N+2*E, S+2*E, 2*S+E, 2*S+W, S+2*W, N+2*W, 2*N+W),
'B': (N+E, S+E, S+W, N+W),
'R': (N, E, S, W),
'Q': (N, E, S, W, N+E, S+E, S+W, N+W),
'K': (N, E, S, W, N+E, S+E, S+W, N+W)
}
pst = {
'P': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 198, 198, 198, 198, 198, 198, 198, 198, 0,
0, 178, 198, 198, 198, 198, 198, 198, 178, 0,
0, 178, 198, 198, 198, 198, 198, 198, 178, 0,
0, 178, 198, 208, 218, 218, 208, 198, 178, 0,
0, 178, 198, 218, 238, 238, 218, 198, 178, 0,
0, 178, 198, 208, 218, 218, 208, 198, 178, 0,
0, 178, 198, 198, 198, 198, 198, 198, 178, 0,
0, 198, 198, 198, 198, 198, 198, 198, 198, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'B': (
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 797, 824, 817, 808, 808, 817, 824, 797, 0,
0, 814, 841, 834, 825, 825, 834, 841, 814, 0,
0, 818, 845, 838, 829, 829, 838, 845, 818, 0,
0, 824, 851, 844, 835, 835, 844, 851, 824, 0,
0, 827, 854, 847, 838, 838, 847, 854, 827, 0,
0, 826, 853, 846, 837, 837, 846, 853, 826, 0,
0, 817, 844, 837, 828, 828, 837, 844, 817, 0,
0, 792, 819, 812, 803, 803, 812, 819, 792, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'N': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 627, 762, 786, 798, 798, 786, 762, 627, 0,
0, 763, 798, 822, 834, 834, 822, 798, 763, 0,
0, 817, 852, 876, 888, 888, 876, 852, 817, 0,
0, 797, 832, 856, 868, 868, 856, 832, 797, 0,
0, 799, 834, 858, 870, 870, 858, 834, 799, 0,
0, 758, 793, 817, 829, 829, 817, 793, 758, 0,
0, 739, 774, 798, 810, 810, 798, 774, 739, 0,
0, 683, 718, 742, 754, 754, 742, 718, 683, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'R': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 1258, 1263, 1268, 1272, 1272, 1268, 1263, 1258, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'Q': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 2529, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'K': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 60098, 60132, 60073, 60025, 60025, 60073, 60132, 60098, 0,
0, 60119, 60153, 60094, 60046, 60046, 60094, 60153, 60119, 0,
0, 60146, 60180, 60121, 60073, 60073, 60121, 60180, 60146, 0,
0, 60173, 60207, 60148, 60100, 60100, 60148, 60207, 60173, 0,
0, 60196, 60230, 60171, 60123, 60123, 60171, 60230, 60196, 0,
0, 60224, 60258, 60199, 60151, 60151, 60199, 60258, 60224, 0,
0, 60287, 60321, 60262, 60214, 60214, 60262, 60321, 60287, 0,
0, 60298, 60332, 60273, 60225, 60225, 60273, 60332, 60298, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
}
###############################################################################
# Chess logic
###############################################################################
class Position(namedtuple('Position', 'board score wc bc ep kp rt')):
""" A state of a chess game
board -- a 120 char representation of the board
score -- the board evaluation
wc -- the castling rights
bc -- the opponent castling rights
ep - the en passant square
kp - the king passant square
rt - is the board rotated (added by ED)
"""
def genMoves(self):
# For each of our pieces, iterate through each possible 'ray' of moves,
# as defined in the 'directions' map. The rays are broken e.g. by
# captures or immediately in case of pieces such as knights.
for i, p in enumerate(self.board):
if not p.isupper(): continue
for d in directions[p]:
for j in count(i+d, d):
q = self.board[j]
# Stay inside the board
if self.board[j].isspace(): break
# Castling
if i == A1 and q == 'K' and self.wc[0]: yield (j, j-2)
if i == H1 and q == 'K' and self.wc[1]: yield (j, j+2)
# No friendly captures
if q.isupper(): break
# Special pawn stuff
if p == 'P' and d in (N+W, N+E) and q == '.' and j not in (self.ep, self.kp): break
if p == 'P' and d in (N, 2*N) and q != '.': break
if p == 'P' and d == 2*N and (i < A1+N or self.board[i+N] != '.'): break
# Move it
yield (i, j)
# Stop crawlers from sliding
if p in ('P', 'N', 'K'): break
# No sliding after captures
if q.islower(): break
def rotate(self):
return Position(
self.board[::-1].swapcase(), -self.score,
self.bc, self.wc, 119-self.ep, 119-self.kp, not(self.rt))
def move(self, move):
i, j = move
p, q = self.board[i], self.board[j]
put = lambda board, i, p: board[:i] + p + board[i+1:]
# Copy variables and reset ep and kp
board = self.board
wc, bc, ep, kp = self.wc, self.bc, 0, 0
score = self.score + self.value(move)
# Actual move
board = put(board, j, board[i])
board = put(board, i, '.')
# Castling rights
if i == A1: wc = (False, wc[1])
if i == H1: wc = (wc[0], False)
if j == A8: bc = (bc[0], False)
if j == H8: bc = (False, bc[1])
# Castling
if p == 'K':
wc = (False, False)
if abs(j-i) == 2:
kp = (i+j)//2
board = put(board, A1 if j < i else H1, '.')
board = put(board, kp, 'R')
# Special pawn stuff
if p == 'P':
if A8 <= j <= H8:
board = put(board, j, 'Q')
if j - i == 2*N:
ep = i + N
if j - i in (N+W, N+E) and q == '.':
board = put(board, j+S, '.')
# We rotate the returned position, so it's ready for the next player
return Position(board, score, wc, bc, ep, kp, self.rt).rotate()
def value(self, move):
i, j = move
p, q = self.board[i], self.board[j]
# Actual move
score = pst[p][j] - pst[p][i]
# Capture
if q.islower():
score += pst[q.upper()][j]
# Castling check detection
if abs(j-self.kp) < 2:
score += pst['K'][j]
# Castling
if p == 'K' and abs(i-j) == 2:
score += pst['R'][(i+j)//2]
score -= pst['R'][A1 if j < i else H1]
# Special pawn stuff
if p == 'P':
if A8 <= j <= H8:
score += pst['Q'][j] - pst['P'][j]
if j == self.ep:
score += pst['P'][j+S]
return score
# Prints the string representing the board to a text file. Added by Evan Dyke
# on 12-7-14
#
def printBoard(self, filename, mode):
f = open(filename, mode)
if(self.rt):
f.write((self.rotate().board[A8:(H1+1)].replace(" ","")).replace("\n","") + "\n")
else:
f.write((self.board[A8:(H1+1)].replace(" ","")).replace("\n","") + "\n")
Entry = namedtuple('Entry', 'depth score gamma move')
tp = OrderedDict()
###############################################################################
# Search logic
###############################################################################
nodes = 0
def bound(pos, gamma, depth):
""" returns s(pos) <= r < gamma if s(pos) < gamma
returns s(pos) >= r >= gamma if s(pos) >= gamma """
global nodes; nodes += 1
# Look in the table if we have already searched this position before.
# We use the table value if it was done with at least as deep a search
# as ours, and the gamma value is compatible.
entry = tp.get(pos)
if entry is not None and entry.depth >= depth and (
entry.score < entry.gamma and entry.score < gamma or
entry.score >= entry.gamma and entry.score >= gamma):
return entry.score
# Stop searching if we have won/lost.
if abs(pos.score) >= MATE_VALUE:
return pos.score
# Null move. Is also used for stalemate checking
nullscore = -bound(pos.rotate(), 1-gamma, depth-3) if depth > 0 else pos.score
#nullscore = -MATE_VALUE*3 if depth > 0 else pos.score
if nullscore >= gamma:
return nullscore
# We generate all possible, pseudo legal moves and order them to provoke
# cuts. At the next level of the tree we are going to minimize the score.
# This can be shown equal to maximizing the negative score, with a slightly
# adjusted gamma value.
best, bmove = -3*MATE_VALUE, None
for move in sorted(pos.genMoves(), key=pos.value, reverse=True):
# We check captures with the value function, as it also contains ep and kp
if depth <= 0 and pos.value(move) < 150:
break
score = -bound(pos.move(move), 1-gamma, depth-1)
if score > best:
best = score
bmove = move
if score >= gamma:
break
# If there are no captures, or just not any good ones, stand pat
if depth <= 0 and best < nullscore:
return nullscore
# Check for stalemate. If best move loses king, but not doing anything
# would save us. Not at all a perfect check.
if depth > 0 and best <= -MATE_VALUE is None and nullscore > -MATE_VALUE:
best = 0
# We save the found move together with the score, so we can retrieve it in
# the play loop. We also trim the transposition table in FILO order.
# We prefer fail-high moves, as they are the ones we can build our pv from.
if entry is None or depth >= entry.depth and best >= gamma:
tp[pos] = Entry(depth, best, gamma, bmove)
if len(tp) > TABLE_SIZE:
tp.pop()
return best
def search(pos, maxn=NODES_SEARCHED):
""" Iterative deepening MTD-bi search """
global nodes; nodes = 0
# We limit the depth to some constant, so we don't get a stack overflow in
# the end game.
for depth in range(1, 99):
# The inner loop is a binary search on the score of the position.
# Inv: lower <= score <= upper
# However this may be broken by values from the transposition table,
# as they don't have the same concept of p(score). Hence we just use
# 'lower < upper - margin' as the loop condition.
lower, upper = -3*MATE_VALUE, 3*MATE_VALUE
while lower < upper - 3:
gamma = (lower+upper+1)//2
score = bound(pos, gamma, depth)
if score >= gamma:
lower = score
if score < gamma:
upper = score
# print("Searched %d nodes. Depth %d. Score %d(%d/%d)" % (nodes, depth, score, lower, upper))
# We stop deepening if the global N counter shows we have spent too
# long, or if we have already won the game.
if nodes >= maxn or abs(score) >= MATE_VALUE:
break
# If the game hasn't finished we can retrieve our move from the
# transposition table.
entry = tp.get(pos)
if entry is not None:
return entry.move, score
return None, score
###############################################################################
# User interface
###############################################################################
# Python 2 compatability
if sys.version_info[0] == 2:
input = raw_input
def parse(c):
fil, rank = ord(c[0]) - ord('a'), int(c[1]) - 1
return A1 + fil - 10*rank
def render(i):
rank, fil = divmod(i - A1, 10)
return chr(fil + ord('a')) + str(-rank + 1)
def main():
# Check to see the mode from command-line input. if 0, play regularly. 1 is random moves
if(sys.argv[1] == 'c'):
playAgainstComputer()
elif(sys.argv[1] == 'r'):
for count in range(1, GAMES_PLAYED+1):
randomMoves("game" + str(count) + ".txt")
def playAgainstComputer():
pos = Position(initial, 0, (True,True), (True,True), 0, 0, False)
# Modified code to print board to file if optional argument added
# Modified by Evan Dyke on 12-7-14
if(len(sys.argv) > ARG_OUTPUTFILE):
pos.printBoard(sys.argv[ARG_OUTPUTFILE], 'w')
while True:
# We add some spaces to the board before we print it.
# That makes it more readable and pleasing.
print(' '.join(pos.board))
# We query the user until she enters a legal move.
move = None
while move not in pos.genMoves():
crdn = input("Your move: ")
try:
move = parse(crdn[0:2]), parse(crdn[2:4])
except ValueError:
# Inform the user when invalid input (e.g. "help") is entered
print("Invalid input. Please enter a move in the proper format (e.g. g8f6)")
pos = pos.move(move)
# Code added by ED to print board to a text file
if(len(sys.argv) > ARG_OUTPUTFILE):
pos.printBoard(sys.argv[ARG_OUTPUTFILE], 'a')
# After our move we rotate the board and print it again.
# This allows us to see the effect of our move.
print(' '.join(pos.rotate().board))
# Fire up the engine to look for a move.
move, score = search(pos)
if score <= -MATE_VALUE:
print("You won")
# Modified code to print board to file if optional argument added
# Modified by Evan Dyke on 12-13-14
if(len(sys.argv) > ARG_OUTPUTFILE):
pos = pos.move(move)
pos.printBoard(sys.argv[ARG_OUTPUTFILE],'a')
f = open(sys.argv[1],'a')
f.write("1")
break
if score >= MATE_VALUE:
print("You lost")
# Modified code to print board to file if optional argument added
# Modified by Evan Dyke on 12-13-14
if(len(sys.argv) > ARG_OUTPUTFILE):
pos = pos.move(move)
pos.printBoard(sys.argv[ARG_OUTPUTFILE],'a')
f = open(sys.argv[ARG_OUTPUTFILE],'a')
f.write("2")
break
# The black player moves from a rotated position, so we have to
# 'back rotate' the move before printing it.
print("My move:", render(119-move[0]) + render(119-move[1]))
pos = pos.move(move)
# Modified code to print board to file if optional argument added
# Modified by Evan Dyke on 12-7-14
if(len(sys.argv) > ARG_OUTPUTFILE):
pos.printBoard(sys.argv[ARG_OUTPUTFILE], 'a')
# This function generates a of chess by randomly moving pieces on both sides
# until it is won by one of players. The state of the game is written to a text
# file.
#
# Modified Sunfish code by ED on 12-18-14
# Paramters:
# outputFilename filename to write the game out to
#
def randomMoves(outputFilename):
pos = Position(initial, 0, (True,True), (True,True), 0, 0, False)
# Modified code to print board to file if optional argument added
# Modified by Evan Dyke on 12-7-14
pos.printBoard(outputFilename, 'w')
while True:
# We add some spaces to the board before we print it.
# That makes it more readable and pleasing.
print(' '.join(pos.board))
# Randomly generate move for PLayer 1 if game still in session
if(abs(pos.score) < MATE_VALUE):
# Plater 1 move
move = random.choice(list(pos.genMoves()))
pos = pos.move(move)
# Code added by ED to print board to a text file
pos.printBoard(outputFilename, 'a')
# After our move we rotate the board and print it again.
# This allows us to see the effect of our move.
#print(' '.join(pos.rotate().board))
# Randomly generate move for PLayer 2 if game still in session
if(abs(pos.score) < MATE_VALUE):
move = random.choice(list(pos.genMoves()))
pos = pos.move(move)
# Code added by ED to print board to a text file
pos.printBoard(outputFilename, 'a')
if(pos.rt):
pos = pos.rotate()
if pos.score >= MATE_VALUE:
print("You won")
# Modified code to print board to file
f = open(outputFilename,'a')
f.write("1")
break
if pos.score <= -MATE_VALUE:
print("You lost")
# Modified code to print board to fileif(len(sys.argv) > ARG_OUTPUTFILE):
f = open(outputFilename,'a')
f.write("2")
break
if __name__ == '__main__':
main()
| gpl-3.0 | -5,456,293,336,961,989,000 | 37.203488 | 103 | 0.505504 | false |
subodhchhabra/glances | glances/globals.py | 1 | 1502 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Common objects shared by all Glances modules."""
import os
import sys
# Operating system flag
# Note: Somes libs depends of OS
BSD = sys.platform.find('bsd') != -1
LINUX = sys.platform.startswith('linux')
OSX = sys.platform.startswith('darwin')
WINDOWS = sys.platform.startswith('win')
# Path definitions
work_path = os.path.realpath(os.path.dirname(__file__))
appname_path = os.path.split(sys.argv[0])[0]
sys_prefix = os.path.realpath(os.path.dirname(appname_path))
# Set the plugins and export modules path
plugins_path = os.path.realpath(os.path.join(work_path, 'plugins'))
exports_path = os.path.realpath(os.path.join(work_path, 'exports'))
sys_path = sys.path[:]
sys.path.insert(1, exports_path)
sys.path.insert(1, plugins_path)
| lgpl-3.0 | -4,663,706,417,952,134,000 | 34.761905 | 77 | 0.740346 | false |
macleinn/heimdall | uploader/drive_uploader.py | 1 | 6245 | import logging
import os
import time
from configparser import ConfigParser
import httplib2
from apiclient import discovery
from apiclient.http import MediaFileUpload
from oauth2client import client
from oauth2client.file import Storage
try:
import argparse
media_types = ["photo", "video"]
parser = argparse.ArgumentParser(description='Uploader')
parser.add_argument('-c', '--config', help='Configuration file for the program', required=True)
parser.add_argument('-f', '--files', help='Files to be uploaded', type=str, required=True)
parser.add_argument('-t', '--type', help='Type (photo or video)', choices=media_types, required=True)
args = vars(parser.parse_args())
except ImportError:
args = None
LOG_FORMAT = '%(asctime)s %(message)s'
SCOPES = 'https://www.googleapis.com/auth/drive'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
GOOGLE_FOLDER_MIME_TYPE = 'application/vnd.google-apps.folder'
CLIENT_SECRET_FILE = 'client_secret.json'
TOKEN_FILE = 'uploader_token.json'
APPLICATION_NAME = 'uploader'
def get_config(config_file_path):
"""Gets configuration.
Returns:
Configurations
"""
app_config = ConfigParser()
app_config.read(config_file_path)
config = dict()
config['credentials_dir'] = app_config['local']['credentials']
config['log_dir'] = app_config['local']['log']
config['photo_dir'] = app_config['drive']['photo']
config['video_dir'] = app_config['drive']['video']
return config
def get_credentials(config):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
credential_dir = config.get("credentials_dir", None)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
token_path = os.path.join(credential_dir, TOKEN_FILE)
store = Storage(token_path)
credentials = store.get()
if not credentials or credentials.invalid:
credentials_path = os.path.join(credential_dir, CLIENT_SECRET_FILE)
flow = client.flow_from_clientsecrets(credentials_path, SCOPES, REDIRECT_URI)
flow.user_agent = APPLICATION_NAME
auth_uri = flow.step1_get_authorize_url()
print('Go to this link in your browser:')
print(auth_uri)
auth_code = input('Enter the auth code: ')
credentials = flow.step2_exchange(auth_code)
store.put(credentials)
logging.info('Storing credentials to ' + token_path)
return credentials
def get_folder(service, drive_folder_name, file_folder_name):
"""Find and return the id of the folder given the name.
Returns:
Folder ID
"""
files = service.files().list(
q="name='%s' and mimeType contains '%s' and trashed=false" % (
drive_folder_name, GOOGLE_FOLDER_MIME_TYPE)).execute()
if len(files.get('files')) == 1:
parent_id = files.get('files')[0].get('id')
else:
logging.info('Could not find the folder: %s', drive_folder_name)
file_metadata = {'name': drive_folder_name, 'mimeType': GOOGLE_FOLDER_MIME_TYPE}
parent_folder = service.files().create(body=file_metadata).execute()
parent_id = parent_folder.get('id')
logging.info('Created new folder: %s', drive_folder_name)
fileQuery = "name='%s' and '%s' in parents and mimeType contains '%s' and trashed=false" % (
file_folder_name, parent_id, GOOGLE_FOLDER_MIME_TYPE)
files = service.files().list(q=fileQuery).execute()
if len(files.get('files')) == 1:
folder_id = files.get('files')[0].get('id')
else:
logging.info('Could not find the folder: %s', file_folder_name)
file_metadata = {'name': file_folder_name, 'parents': [parent_id], 'mimeType': GOOGLE_FOLDER_MIME_TYPE}
file_folder = service.files().create(body=file_metadata).execute()
folder_id = file_folder.get('id')
logging.info('Created new folder: %s', file_folder_name)
return folder_id
def breakdown_files(files):
"""Breakdown into a list of files
"""
breakdown_list = []
for f in files:
f_path = os.path.abspath(f)
if os.path.exists(f_path):
if os.path.isdir(f_path):
children = [os.path.join(f_path, child) for child in os.listdir(f_path)]
breakdown_list.extend(children)
else:
breakdown_list.append(f_path)
return breakdown_list
def upload_files(config, service, media_type, files):
"""Upload given files to Google Drive
"""
if media_type == "video":
root = config.get('video_dir')
file_type = 'video/avi'
else:
root = config.get('photo_dir')
file_type = 'image/jpeg'
for file in files:
try:
file_dir = time.strftime('%Y-%m-%d', time.localtime(os.path.getmtime(file)))
folder_id = get_folder(service, root, file_dir)
file_name = os.path.basename(file)
media = MediaFileUpload(file, mimetype=file_type)
file_metadata = {'name': file_name, 'parents': [folder_id]}
service.files().create(body=file_metadata, media_body=media, fields='id').execute()
os.remove(file)
except OSError:
logging.error('Failed to upload: %s', os.path.abspath(file))
def main():
"""Creates a Google Drive API service object and uploads files
"""
config = get_config(args.get('config'))
file_list = [item.strip() for item in args.get('files').split(',')]
media_type = args.get('type')
log_file_path = os.path.join(config.get("log_dir"), 'uploader.log')
logging.basicConfig(filename=log_file_path, format=LOG_FORMAT, level=logging.INFO)
credentials = get_credentials(config)
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http, cache_discovery=False)
breakdown_list = breakdown_files(file_list)
if breakdown_list:
upload_files(config, service, media_type, breakdown_list)
else:
logging.info('No files found.')
if __name__ == '__main__':
main()
| gpl-3.0 | -1,443,827,264,816,349,700 | 34.282486 | 111 | 0.643555 | false |
fusioneng/gif2html5-app | tests/test_server.py | 1 | 4198 | import server
import unittest
import requests
from unittest.mock import MagicMock, ANY, patch
from flask import json
from gif2html5.config_parser import get_config
from gif2html5.s3_manager import S3Manager
from tests.test_context import TestContext
class JsonPayloadAttachmentIdMatcher(object):
def __init__(self, o):
self.o = o
def __eq__(self, o):
return o['attachment_id'] == '123'
class FlaskTestCase(TestContext):
def setUp(self):
server.app.config['TESTING'] = True
server.app.config['API_KEY'] = '123456'
self.app = server.app.test_client()
def test_authentication(self):
payload = {'url': 'http://media.giphy.com/media/WSqcqvTxgwfYs/giphy.gif'}
response = self.app.post('/convert', data=json.dumps(payload), follow_redirects=True)
self.assertEqual(response.status_code, 401)
def test_getting_mp4(self):
payload = {'url': 'http://media.giphy.com/media/WSqcqvTxgwfYs/giphy.gif',
'api_key': '123456'}
response = self.app.post('/convert', data=json.dumps(payload), follow_redirects=True)
self.assertEqual(response.status_code, 200)
data = json.loads(response.data)
self.assertRegex(data['mp4'], '\.mp4')
self.assertRegex(data['ogv'], '\.ogv')
self.assertRegex(data['webm'], '\.webm')
self.assertRegex(data['snapshot'], '\.jpg')
file_to_delete = data['mp4'].split('/')[-1]
s3Manager = S3Manager(get_config())
s3Manager.delete(file_to_delete)
def test_malformed_json_request(self):
payload = '{"url":"http://media.giphy.com/media/WSqcqvTxgwfYs/giphy.gif" "webhook":"http://google.com" }'
response = self.app.post('/convert', data=payload, follow_redirects=True)
self.assertEqual(response.status_code, 406)
def test_getting_mp4_without_payload(self):
response = self.app.post('/convert', follow_redirects=True)
self.assertEqual(response.status_code, 406)
def test_webhook(self):
server.convert_video.delay = MagicMock()
payload = {'api_key': '123456',
'url': 'http://media.giphy.com/media/WSqcqvTxgwfYs/giphy.gif',
'webhook': 'http://www.google.com'}
response = self.app.post('/convert', data=json.dumps(payload), follow_redirects=True)
self.assertEqual(response.status_code, 200)
server.convert_video.delay.assert_called_with(ANY, 'http://www.google.com')
@patch('gif2html5.video_manager.convert')
@patch('requests.post')
def test_retry_failed_task(self, mock_video_manager, mock_requests):
mock_video_manager.return_value = {'webm': 'file.webm', 'mp4': 'file.mp4', 'ogv': 'file.ogv', 'snapshot': 'snapshot.png'}
error = Exception('some error')
mock_video_manager.side_effect = error
server.convert_video.retry = MagicMock()
server.convert_video('http://media.giphy.com/media/WSqcqvTxgwfYs/giphy.gif', 'http://www.company.com/webhook?attachment_id=1234')
server.convert_video.retry.assert_called_with(exc=error)
@patch('gif2html5.video_manager.convert')
@patch('requests.post')
def test_video_converter_task(self, mock_video_manager, mock_requests):
mock_video_manager.return_value = {'webm': 'file.webm', 'mp4': 'file.mp4', 'ogv': 'file.ogv', 'snapshot': 'snapshot.png'}
server.upload_resources = MagicMock(return_value={})
server.convert_video.apply(args=('http://media.giphy.com/media/WSqcqvTxgwfYs/giphy.gif', 'http://www.google.com?attachment_id=123')).get()
payload = {'attachment_id': '123'}
requests.post.assert_called_with('http://www.google.com?attachment_id=123', data=JsonPayloadAttachmentIdMatcher(payload))
@patch('requests.post')
def test_video_converter_task_without_attachment_id(self, mock_requests):
server.convert_video.apply(args=('http://media.giphy.com/media/WSqcqvTxgwfYs/giphy.gif', 'http://www.google.com')).get()
requests.post.assert_called_with('http://www.google.com', data={'message': 'It looks like you are missing attachment_id'})
if __name__ == '__main__':
unittest.main()
| mit | -7,445,330,195,105,273,000 | 40.564356 | 146 | 0.656503 | false |
pmaxit/DJANGO_IOT | django_iot/settings/common.py | 1 | 5065 | """
Django settings for Django for IoT project on Heroku. For more info, see:
https://github.com/aschn/cookiecutter-django-iot
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "PASS"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Application definition
SITE_ID = 1
INSTALLED_APPS = (
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# local apps
'django_celery_results',
'widget_tweaks',
'django_iot.apps.devices',
'django_iot.apps.observations',
'django_iot.apps.interactions',
'bootstrapform',
'clear_cache',
'webpack_loader'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'login'
ROOT_URLCONF = 'django_iot.urls'
TEMPLATES = (
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
)
WSGI_APPLICATION = 'django_iot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'assets/bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
AUTH_PASSWORD_VALIDATORS = (
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
)
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'static'),
]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
########## CELERY CONFIGURATION
# recommended settings: https://www.cloudamqp.com/docs/celery.html
BROKER_POOL_LIMIT = 10 # Will decrease connection usage
BROKER_HEARTBEAT = None # We're using TCP keep-alive instead
BROKER_CONNECTION_TIMEOUT = 30 # May require a long timeout due to Linux DNS timeouts etc
CELERY_RESULT_BACKEND = 'django-db' # AMQP is not recommended as result backend as it creates thousands of queues
CELERY_SEND_EVENTS = False # Will not create celeryev.* queues
CELERY_EVENT_QUEUE_EXPIRES = 60 # Will delete all celeryev. queues without consumers after 1 minute.
BROKER_URL = os.environ.get('CLOUDAMQP_URL', 'amqp://puneet:[email protected]/myhost')
########## END CELERY CONFIGURATION
| mit | -8,899,186,612,826,330,000 | 29.14881 | 114 | 0.697137 | false |
our-city-app/oca-backend | src/solutions/common/to/payments.py | 1 | 1721 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import unicode_property, bool_property, long_property
from rogerthat.to import TO
class PaymentSettingsTO(TO):
optional = bool_property('optional')
@classmethod
def from_model(cls, model):
return cls(optional=model.payment_optional)
class TransactionDetailsTO(TO):
id = unicode_property('id')
timestamp = long_property('timestamp')
currency = unicode_property('currency')
amount = long_property('amount')
amount_str = unicode_property('amount_str')
precision = long_property('precision')
status = unicode_property('status')
def get_display_amount(self):
return float(self.amount) / pow(10, self.precision)
@classmethod
def from_model(cls, model):
return cls(id=model.id,
timestamp=model.timestamp,
currency=model.currency,
amount=model.amount,
amount_str=u'%.2f' % (float(model.amount) / pow(10, model.precision)),
precision=model.precision,
status=model.status)
| apache-2.0 | -8,937,112,238,007,400,000 | 33.42 | 89 | 0.674608 | false |
schleichdi2/OPENNFR-6.1-CORE | opennfr-openembedded-core/meta/lib/oe/package_manager.py | 1 | 65050 | from abc import ABCMeta, abstractmethod
import os
import glob
import subprocess
import shutil
import multiprocessing
import re
import collections
import bb
import tempfile
import oe.utils
import oe.path
import string
from oe.gpg_sign import get_signer
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
index_cmd = arg
try:
bb.note("Executing '%s' ..." % index_cmd)
result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
return("Index creation command '%s' failed with return code %d:\n%s" %
(e.cmd, e.returncode, e.output.decode("utf-8")))
if result:
bb.note(result)
return None
"""
This method parse the output from the package managerand return
a dictionary with the information of the packages. This is used
when the packages are in deb or ipk format.
"""
def opkg_query(cmd_output):
verregex = re.compile(' \([=<>]* [^ )]*\)')
output = dict()
pkg = ""
arch = ""
ver = ""
filename = ""
dep = []
pkgarch = ""
for line in cmd_output.splitlines():
line = line.rstrip()
if ':' in line:
if line.startswith("Package: "):
pkg = line.split(": ")[1]
elif line.startswith("Architecture: "):
arch = line.split(": ")[1]
elif line.startswith("Version: "):
ver = line.split(": ")[1]
elif line.startswith("File: ") or line.startswith("Filename:"):
filename = line.split(": ")[1]
if "/" in filename:
filename = os.path.basename(filename)
elif line.startswith("Depends: "):
depends = verregex.sub('', line.split(": ")[1])
for depend in depends.split(", "):
dep.append(depend)
elif line.startswith("Recommends: "):
recommends = verregex.sub('', line.split(": ")[1])
for recommend in recommends.split(", "):
dep.append("%s [REC]" % recommend)
elif line.startswith("PackageArch: "):
pkgarch = line.split(": ")[1]
# When there is a blank line save the package information
elif not line:
# IPK doesn't include the filename
if not filename:
filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
if pkg:
output[pkg] = {"arch":arch, "ver":ver,
"filename":filename, "deps": dep, "pkgarch":pkgarch }
pkg = ""
arch = ""
ver = ""
filename = ""
dep = []
pkgarch = ""
if pkg:
if not filename:
filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
output[pkg] = {"arch":arch, "ver":ver,
"filename":filename, "deps": dep }
return output
class Indexer(object, metaclass=ABCMeta):
def __init__(self, d, deploy_dir):
self.d = d
self.deploy_dir = deploy_dir
@abstractmethod
def write_index(self):
pass
class RpmIndexer(Indexer):
def write_index(self):
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
raise NotImplementedError('Package feed signing not yet implementd for rpm')
createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
result = create_index("%s --update -q %s" % (createrepo_c, self.deploy_dir))
if result:
bb.fatal(result)
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
"MULTILIB_ARCHS"]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
open(os.path.join(self.deploy_dir, "Packages"), "w").close()
index_cmds = set()
index_sign_files = set()
for arch_var in arch_vars:
archs = self.d.getVar(arch_var)
if archs is None:
continue
for arch in archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
pkgs_file = os.path.join(pkgs_dir, "Packages")
if not os.path.isdir(pkgs_dir):
continue
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
index_cmds.add('%s -r %s -p %s -m %s' %
(opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
index_sign_files.add(pkgs_file)
if len(index_cmds) == 0:
bb.note("There are no packages in %s!" % self.deploy_dir)
return
result = oe.utils.multiprocess_exec(index_cmds, create_index)
if result:
bb.fatal('%s' % ('\n'.join(result)))
if signer:
feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (feed_sig_type.upper() != "BIN")
for f in index_sign_files:
signer.detach_sign(f,
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
class DpkgIndexer(Indexer):
def _create_configs(self):
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
with open(os.path.join(self.apt_conf_dir, "preferences"),
"w") as prefs_file:
pass
with open(os.path.join(self.apt_conf_dir, "sources.list"),
"w+") as sources_file:
pass
with open(self.apt_conf_file, "w") as apt_conf:
with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
"apt", "apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
line = re.sub("#ROOTFS#", "/dev/null", line)
line = re.sub("#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
def write_index(self):
self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
"apt-ftparchive")
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self._create_configs()
os.environ['APT_CONFIG'] = self.apt_conf_file
pkg_archs = self.d.getVar('PACKAGE_ARCHS')
if pkg_archs is not None:
arch_list = pkg_archs.split()
sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
if sdk_pkg_archs is not None:
for a in sdk_pkg_archs.split():
if a not in pkg_archs:
arch_list.append(a)
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
gzip = bb.utils.which(os.getenv('PATH'), "gzip")
index_cmds = []
deb_dirs_found = False
for arch in arch_list:
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
continue
cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
cmd += "%s -fc Packages > Packages.gz;" % gzip
with open(os.path.join(arch_dir, "Release"), "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
index_cmds.append(cmd)
deb_dirs_found = True
if not deb_dirs_found:
bb.note("There are no packages in %s" % self.deploy_dir)
return
result = oe.utils.multiprocess_exec(index_cmds, create_index)
if result:
bb.fatal('%s' % ('\n'.join(result)))
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
raise NotImplementedError('Package feed signing not implementd for dpkg')
class PkgsList(object, metaclass=ABCMeta):
def __init__(self, d, rootfs_dir):
self.d = d
self.rootfs_dir = rootfs_dir
@abstractmethod
def list_pkgs(self):
pass
class RpmPkgsList(PkgsList):
def list_pkgs(self):
return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR')).list_installed()
class OpkgPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, config_file):
super(OpkgPkgsList, self).__init__(d, rootfs_dir)
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
self.opkg_args += self.d.getVar("OPKG_ARGS")
def list_pkgs(self, format=None):
cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
# opkg returns success even when it printed some
# "Collected errors:" report to stderr. Mixing stderr into
# stdout then leads to random failures later on when
# parsing the output. To avoid this we need to collect both
# output streams separately and check for empty stderr.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
cmd_output, cmd_stderr = p.communicate()
cmd_output = cmd_output.decode("utf-8")
cmd_stderr = cmd_stderr.decode("utf-8")
if p.returncode or cmd_stderr:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
return opkg_query(cmd_output)
class DpkgPkgsList(PkgsList):
def list_pkgs(self):
cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
"--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
"-W"]
cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
try:
cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
return opkg_query(cmd_output)
class PackageManager(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
def __init__(self, d):
self.d = d
self.deploy_dir = None
self.deploy_lock = None
"""
Update the package manager package database.
"""
@abstractmethod
def update(self):
pass
"""
Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
True, installation failures are ignored.
"""
@abstractmethod
def install(self, pkgs, attempt_only=False):
pass
"""
Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
is False, the any dependencies are left in place.
"""
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
pass
"""
This function creates the index files
"""
@abstractmethod
def write_index(self):
pass
@abstractmethod
def remove_packaging_data(self):
pass
@abstractmethod
def list_installed(self):
pass
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
@abstractmethod
def extract(self, pkg):
pass
"""
Add remote package feeds into repository manager configuration. The parameters
for the feeds are set by feed_uris, feed_base_paths and feed_archs.
See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
for their description.
"""
@abstractmethod
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
pass
"""
Install complementary packages based upon the list of currently installed
packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
these packages, if they don't exist then no error will occur. Note: every
backend needs to call this function explicitly after the normal package
installation
"""
def install_complementary(self, globs=None):
# we need to write the list of installed packages to a file because the
# oe-pkgdata-util reads it from a file
installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR'),
"installed_pkgs.txt")
with open(installed_pkgs_file, "w+") as installed_pkgs:
pkgs = self.list_installed()
output = oe.utils.format_pkg_list(pkgs, "arch")
installed_pkgs.write(output)
if globs is None:
globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
split_linguas = set()
for translation in self.d.getVar('IMAGE_LINGUAS').split():
split_linguas.add(translation)
split_linguas.add(translation.split('-')[0])
split_linguas = sorted(split_linguas)
for lang in split_linguas:
globs += " *-locale-%s" % lang
if globs is None:
return
cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
"-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs_file,
globs]
exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
if exclude:
cmd.extend(['--exclude=' + '|'.join(exclude.split())])
try:
bb.note("Installing complementary packages ...")
bb.note('Running %s' % cmd)
complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
self.install(complementary_pkgs.split(), attempt_only=True)
os.remove(installed_pkgs_file)
def deploy_dir_lock(self):
if self.deploy_dir is None:
raise RuntimeError("deploy_dir is not set!")
lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
self.deploy_lock = bb.utils.lockfile(lock_file_name)
def deploy_dir_unlock(self):
if self.deploy_lock is None:
return
bb.utils.unlockfile(self.deploy_lock)
self.deploy_lock = None
"""
Construct URIs based on the following pattern: uri/base_path where 'uri'
and 'base_path' correspond to each element of the corresponding array
argument leading to len(uris) x len(base_paths) elements on the returned
array
"""
def construct_uris(self, uris, base_paths):
def _append(arr1, arr2, sep='/'):
res = []
narr1 = [a.rstrip(sep) for a in arr1]
narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
for a1 in narr1:
if arr2:
for a2 in narr2:
res.append("%s%s%s" % (a1, sep, a2))
else:
res.append(a1)
return res
return _append(uris, base_paths)
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
providename=None,
arch_var=None,
os_var=None):
super(RpmPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.target_vendor = target_vendor
self.task_name = task_name
if arch_var == None:
self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
else:
self.archs = self.d.getVar(arch_var).replace("-","_")
if task_name == "host":
self.primary_arch = self.d.getVar('SDK_ARCH')
else:
self.primary_arch = self.d.getVar('MACHINE_ARCH')
self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), "oe-rootfs-repo")
bb.utils.mkdirhier(self.rpm_repo_dir)
oe.path.symlink(self.d.getVar('DEPLOY_DIR_RPM'), oe.path.join(self.rpm_repo_dir, "rpm"), True)
self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
self.packaging_data_dirs = ['var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
def _configure_dnf(self):
# libsolv handles 'noarch' internally, we don't need to specify it explicitly
archs = [i for i in self.archs.split() if i not in ["any", "all", "noarch"]]
# This prevents accidental matching against libsolv's built-in policies
if len(archs) <= 1:
archs = archs + ["bogusarch"]
archconfdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
bb.utils.mkdirhier(archconfdir)
open(archconfdir + "arch", 'w').write(":".join(archs))
open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
def _configure_rpm(self):
# We need to configure rpm to use our primary package architecture as the installation architecture,
# and to make it compatible with other package architectures that we use.
# Otherwise it will refuse to proceed with packages installation.
platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
bb.utils.mkdirhier(platformconfdir)
open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
open(rpmrcconfdir + "rpmrc", 'w').write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
if self.d.getVar('RPM_PREFER_ELF_ARCH'):
open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
else:
open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Importing GPG key failed. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
def create_configs(self):
self._configure_dnf()
self._configure_rpm()
def write_index(self):
lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
lf = bb.utils.lockfile(lockfilename, False)
RpmIndexer(self.d, self.rpm_repo_dir).write_index()
bb.utils.unlockfile(lf)
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
from urllib.parse import urlparse
if feed_uris == "":
return
bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
for uri in remote_uris:
repo_name = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
if feed_archs is not None:
repo_uris = [uri + "/" + arch for arch in feed_archs]
else:
repo_uris = [uri]
open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_name + ".repo"), 'w').write("[%s]\nbaseurl=%s\n" % (repo_name, " ".join(repo_uris)))
def _prepare_pkg_transaction(self):
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = oe.path.join(self.d.getVar('WORKDIR'),
"intercept_scripts")
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
def install(self, pkgs, attempt_only = False):
if len(pkgs) == 0:
return
self._prepare_pkg_transaction()
bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
(["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
(["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == 1 else []) +
(["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
["install"] +
pkgs)
failed_scriptlets_pkgnames = collections.OrderedDict()
for line in output.splitlines():
if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
failed_scriptlets_pkgnames[line.split()[-1]] = True
for pkg in failed_scriptlets_pkgnames.keys():
self.save_rpmpostinst(pkg)
def remove(self, pkgs, with_dependencies = True):
if len(pkgs) == 0:
return
self._prepare_pkg_transaction()
if with_dependencies:
self._invoke_dnf(["remove"] + pkgs)
else:
cmd = bb.utils.which(os.getenv('PATH'), "rpm")
args = ["-e", "--nodeps", "--root=%s" %self.target_rootfs]
try:
output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke rpm. Command "
"'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
def upgrade(self):
self._prepare_pkg_transaction()
self._invoke_dnf(["upgrade"])
def autoremove(self):
self._prepare_pkg_transaction()
self._invoke_dnf(["autoremove"])
def remove_packaging_data(self):
self._invoke_dnf(["clean", "all"])
for dir in self.packaging_data_dirs:
bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
def backup_packaging_data(self):
# Save the packaging dirs for increment rpm image generation
if os.path.exists(self.saved_packaging_data):
bb.utils.remove(self.saved_packaging_data, True)
for i in self.packaging_data_dirs:
source_dir = oe.path.join(self.target_rootfs, i)
target_dir = oe.path.join(self.saved_packaging_data, i)
shutil.copytree(source_dir, target_dir, symlinks=True)
def recovery_packaging_data(self):
# Move the rpmlib back
if os.path.exists(self.saved_packaging_data):
for i in self.packaging_data_dirs:
target_dir = oe.path.join(self.target_rootfs, i)
if os.path.exists(target_dir):
bb.utils.remove(target_dir, True)
source_dir = oe.path.join(self.saved_packaging_data, i)
shutil.copytree(source_dir,
target_dir,
symlinks=True)
def list_installed(self):
output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{sourcerpm}\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
print_output = False)
packages = {}
current_package = None
current_deps = None
current_state = "initial"
for line in output.splitlines():
if line.startswith("Package:"):
package_info = line.split(" ")[1:]
current_package = package_info[0]
package_arch = package_info[1]
package_version = package_info[2]
package_srpm = package_info[3]
packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_srpm}
current_deps = []
elif line.startswith("Dependencies:"):
current_state = "dependencies"
elif line.startswith("Recommendations"):
current_state = "recommendations"
elif line.startswith("DependenciesEndHere:"):
current_state = "initial"
packages[current_package]["deps"] = current_deps
elif len(line) > 0:
if current_state == "dependencies":
current_deps.append(line)
elif current_state == "recommendations":
current_deps.append("%s [REC]" % line)
return packages
def update(self):
self._invoke_dnf(["makecache"])
def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
standard_dnf_args = (["-v", "--rpmverbosity=debug"] if self.d.getVar('ROOTFS_RPM_DEBUG') else []) + ["-y",
"-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
"--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
"--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
"--installroot=%s" % (self.target_rootfs),
"--setopt=logdir=%s" % (self.d.getVar('T'))
]
cmd = [dnf_cmd] + standard_dnf_args + dnf_args
try:
output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
if print_output:
bb.note(output)
return output
except subprocess.CalledProcessError as e:
if print_output:
(bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
"'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
else:
(bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
"'%s' returned %d:" % (' '.join(cmd), e.returncode))
return e.output.decode("utf-8")
def dump_install_solution(self, pkgs):
open(self.solution_manifest, 'w').write(" ".join(pkgs))
return pkgs
def load_old_install_solution(self):
if not os.path.exists(self.solution_manifest):
return []
return open(self.solution_manifest, 'r').read().split()
def _script_num_prefix(self, path):
files = os.listdir(path)
numbers = set()
numbers.add(99)
for f in files:
numbers.add(int(f.split("-")[0]))
return max(numbers) + 1
def save_rpmpostinst(self, pkg):
bb.note("Saving postinstall script of %s" % (pkg))
cmd = bb.utils.which(os.getenv('PATH'), "rpm")
args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
try:
output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke rpm. Command "
"'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
# may need to prepend #!/bin/sh to output
target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
bb.utils.mkdirhier(target_path)
num = self._script_num_prefix(target_path)
saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
open(saved_script_name, 'w').write(output)
os.chmod(saved_script_name, 0o755)
def extract(self, pkg):
output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
pkg_name = output.splitlines()[-1]
if not pkg_name.endswith(".rpm"):
bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
try:
cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
os.chdir(current_dir)
return tmp_dir
class OpkgDpkgPM(PackageManager):
"""
This is an abstract class. Do not instantiate this directly.
"""
def __init__(self, d):
super(OpkgDpkgPM, self).__init__(d)
"""
Returns a dictionary with the package info.
This method extracts the common parts for Opkg and Dpkg
"""
def package_info(self, pkg, cmd):
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Unable to list available packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
return opkg_query(output)
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
This method extracts the common parts for Opkg and Dpkg
"""
def extract(self, pkg, pkg_info):
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
pkg_path = pkg_info[pkg]["filepath"]
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
if self.d.getVar('IMAGE_PKGTYPE') == 'deb':
data_tar = 'data.tar.xz'
else:
data_tar = 'data.tar.gz'
try:
cmd = [ar_cmd, 'x', pkg_path]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
cmd = [tar_cmd, 'xf', data_tar]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
os.chdir(current_dir)
return tmp_dir
class OpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
super(OpkgPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK")
self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
self.opkg_args += self.d.getVar("OPKG_ARGS")
opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
bb.utils.mkdirhier(self.opkg_dir)
self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
if self.from_feeds:
self._create_custom_config()
else:
self._create_config()
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
"""
This function will change a package's status in /var/lib/opkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
def mark_packages(self, status_tag, packages=None):
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
os.rename(status_file + ".tmp", status_file)
def _create_custom_config(self):
bb.note("Building from feeds activated!")
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
feed_uri = feed_match.group(2)
bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
"""
Allow to use package deploy directory contents as quick devel-testing
feed. This creates individual feed configs for each arch subdir of those
specified as compatible for the current machine.
NOTE: Development-helper feature, NOT a full-fledged feed.
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
cfg_file_name = os.path.join(self.target_rootfs,
self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
with open(cfg_file_name, "w+") as cfg_file:
cfg_file.write("src/gz local-%s %s/%s" %
(arch,
self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
arch))
if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def _create_config(self):
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
config_file.write("src oe file:%s\n" % self.deploy_dir)
for arch in self.pkg_archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
if os.path.isdir(pkgs_dir):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
if feed_uris == "":
return
rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
% self.target_rootfs)
feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
with open(rootfs_config, "w+") as config_file:
uri_iterator = 0
for uri in feed_uris:
if archs:
for arch in archs:
if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
continue
bb.note('Adding opkg feed url-%s-%d (%s)' %
(arch, uri_iterator, uri))
config_file.write("src/gz uri-%s-%d %s/%s\n" %
(arch, uri_iterator, uri, arch))
else:
bb.note('Adding opkg feed url-%d (%s)' %
(uri_iterator, uri))
config_file.write("src/gz uri-%d %s\n" %
(uri_iterator, uri))
uri_iterator += 1
def update(self):
self.deploy_dir_lock()
cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.deploy_dir_unlock()
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False):
if not pkgs:
return
cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
"intercept_scripts")
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
(bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
def remove(self, pkgs, with_dependencies=True):
if with_dependencies:
cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
else:
cmd = "%s %s --force-depends remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
try:
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def remove_packaging_data(self):
bb.utils.remove(self.opkg_dir, True)
# create the directory back, it's needed by PM lock
bb.utils.mkdirhier(self.opkg_dir)
def remove_lists(self):
if not self.from_feeds:
bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
def list_installed(self):
return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
def handle_bad_recommendations(self):
bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS") or ""
if bad_recommendations.strip() == "":
return
status_file = os.path.join(self.opkg_dir, "status")
# If status file existed, it means the bad recommendations has already
# been handled
if os.path.exists(status_file):
return
cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
with open(status_file, "w+") as status:
for pkg in bad_recommendations.split():
pkg_info = cmd + pkg
try:
output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get package info. Command '%s' "
"returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
if output == "":
bb.note("Ignored bad recommendation: '%s' is "
"not a package" % pkg)
continue
for line in output.split('\n'):
if line.startswith("Status:"):
status.write("Status: deinstall hold not-installed\n")
else:
status.write(line + "\n")
# Append a blank line after each package entry to ensure that it
# is separated from the following entry
status.write("\n")
'''
The following function dummy installs pkgs and returns the log of output.
'''
def dummy_install(self, pkgs):
if len(pkgs) == 0:
return
# Create an temp dir as opkg root for dummy installation
temp_rootfs = self.d.expand('${T}/opkg')
opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
bb.utils.mkdirhier(temp_opkg_dir)
opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
opkg_args += self.d.getVar("OPKG_ARGS")
cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
# Dummy installation
cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
opkg_args,
' '.join(pkgs))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to dummy install packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
bb.utils.remove(temp_rootfs, True)
return output
def backup_packaging_data(self):
# Save the opkglib for increment ipk image generation
if os.path.exists(self.saved_opkg_dir):
bb.utils.remove(self.saved_opkg_dir, True)
shutil.copytree(self.opkg_dir,
self.saved_opkg_dir,
symlinks=True)
def recover_packaging_data(self):
# Move the opkglib back
if os.path.exists(self.saved_opkg_dir):
if os.path.exists(self.opkg_dir):
bb.utils.remove(self.opkg_dir, True)
bb.note('Recover packaging data')
shutil.copytree(self.saved_opkg_dir,
self.opkg_dir,
symlinks=True)
"""
Returns a dictionary with the package info.
"""
def package_info(self, pkg):
cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
pkg_arch = pkg_info[pkg]["arch"]
pkg_filename = pkg_info[pkg]["filename"]
pkg_info[pkg]["filepath"] = \
os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
return pkg_info
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
def extract(self, pkg):
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
return tmp_dir
class DpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
super(DpkgPM, self).__init__(d)
self.target_rootfs = target_rootfs
self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
else:
self.apt_conf_dir = apt_conf_dir
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
self.apt_args = d.getVar("APT_ARGS")
self.all_arch_list = archs.split()
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
self._create_configs(archs, base_archs)
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
"""
This function will change a package's status in /var/lib/dpkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
def mark_packages(self, status_tag, packages=None):
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
os.rename(status_file + ".tmp", status_file)
"""
Run the pre/post installs for package "package_name". If package_name is
None, then run all pre/post install scriptlets.
"""
def run_pre_post_installs(self, package_name=None):
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
control_scripts = [
ControlScript(".preinst", "Preinstall", "install"),
ControlScript(".postinst", "Postinstall", "configure")]
status_file = self.target_rootfs + "/var/lib/dpkg/status"
installed_pkgs = []
with open(status_file, "r") as status:
for line in status.read().split('\n'):
m = re.match("^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
if package_name is not None and not package_name in installed_pkgs:
return
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
"intercept_scripts")
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
failed_pkgs = []
for pkg_name in installed_pkgs:
for control_script in control_scripts:
p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
if os.path.exists(p_full):
try:
bb.note("Executing %s for package: %s ..." %
(control_script.name.lower(), pkg_name))
output = subprocess.check_output([p_full, control_script.argument],
stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.note("%s for package %s failed with %d:\n%s" %
(control_script.name, pkg_name, e.returncode,
e.output.decode("utf-8")))
failed_pkgs.append(pkg_name)
break
if len(failed_pkgs):
self.mark_packages("unpacked", failed_pkgs)
def update(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
self.deploy_dir_lock()
cmd = "%s update" % self.apt_get_cmd
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False):
if attempt_only and len(pkgs) == 0:
return
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
(self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
(bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
new_dir = re.sub("\.dpkg-new", "", dir)
if dir != new_dir:
os.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
new_file = re.sub("\.dpkg-new", "", file)
if file != new_file:
os.rename(os.path.join(root, file),
os.path.join(root, new_file))
def remove(self, pkgs, with_dependencies=True):
if with_dependencies:
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
else:
cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
" -P --force-depends %s" % \
(bb.utils.which(os.getenv('PATH'), "dpkg"),
self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
if feed_uris == "":
return
sources_conf = os.path.join("%s/etc/apt/sources.list"
% self.target_rootfs)
arch_list = []
if feed_archs is None:
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
else:
arch_list = feed_archs.split()
feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
with open(sources_conf, "w+") as sources_file:
for uri in feed_uris:
if arch_list:
for arch in arch_list:
bb.note('Adding dpkg channel at (%s)' % uri)
sources_file.write("deb %s/%s ./\n" %
(uri, arch))
else:
bb.note('Adding dpkg channel at (%s)' % uri)
sources_file.write("deb %s ./\n" % uri)
def _create_configs(self, archs, base_archs):
base_archs = re.sub("_", "-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
arch_list = []
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
priority = 801
for arch in arch_list:
prefs_file.write(
"Package: *\n"
"Pin: release l=%s\n"
"Pin-Priority: %d\n\n" % (arch, priority))
priority += 5
pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
for pkg in pkg_exclude.split():
prefs_file.write(
"Package: %s\n"
"Pin: release *\n"
"Pin-Priority: -1\n\n" % pkg)
arch_list.reverse()
with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
for arch in arch_list:
sources_file.write("deb file:%s/ ./\n" %
os.path.join(self.deploy_dir, arch))
base_arch_list = base_archs.split()
multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
for variant in multilib_variants.split():
localdata = bb.data.createCopy(self.d)
variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
orig_arch = localdata.getVar("DPKG_ARCH")
localdata.setVar("DEFAULTTUNE", variant_tune)
variant_arch = localdata.getVar("DPKG_ARCH")
if variant_arch not in base_arch_list:
base_arch_list.append(variant_arch)
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
match_arch = re.match(" Architecture \".*\";$", line)
architectures = ""
if match_arch:
for base_arch in base_arch_list:
architectures += "\"%s\";" % base_arch
apt_conf.write(" Architectures {%s};\n" % architectures);
apt_conf.write(" Architecture \"%s\";\n" % base_archs)
else:
line = re.sub("#ROOTFS#", self.target_rootfs, line)
line = re.sub("#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
open(os.path.join(target_dpkg_dir, "status"), "w+").close()
if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
open(os.path.join(target_dpkg_dir, "available"), "w+").close()
def remove_packaging_data(self):
bb.utils.remove(os.path.join(self.target_rootfs,
self.d.getVar('opkglibdir')), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Cannot fix broken dependencies. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
def list_installed(self):
return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
"""
Returns a dictionary with the package info.
"""
def package_info(self, pkg):
cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
pkg_arch = pkg_info[pkg]["pkgarch"]
pkg_filename = pkg_info[pkg]["filename"]
pkg_info[pkg]["filepath"] = \
os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
return pkg_info
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
def extract(self, pkg):
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
return tmp_dir
def generate_index_files(d):
classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
indexer_map = {
"rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM')),
"ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
"deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
}
result = None
for pkg_class in classes:
if not pkg_class in indexer_map:
continue
if os.path.exists(indexer_map[pkg_class][1]):
result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
if result is not None:
bb.fatal(result)
if __name__ == "__main__":
"""
We should be able to run this as a standalone script, from outside bitbake
environment.
"""
"""
TBD
"""
| gpl-2.0 | -3,319,517,105,666,360,300 | 40.249207 | 217 | 0.545288 | false |
crlane/helga-xkcd | tests/test_command.py | 1 | 1204 | import mock
import pytest
from functools import partial
from helga_xkcd import command
MOCK_CLIENT = 'irc'
MOCK_CHANNEL = 'bots'
MOCK_NICK = 'crlane'
MOCK_MESSAGE = 'foobar'
MOCK_CMD = '!xkcd'
MOCK_ARGS = [
MOCK_CLIENT,
MOCK_CHANNEL,
MOCK_NICK,
MOCK_MESSAGE,
MOCK_CMD
]
@pytest.fixture
def mock_helga_command():
'''A partial helga command hook'''
return partial(command.xkcd, *MOCK_ARGS)
@pytest.fixture
def mock_subcommand():
return mock.Mock()
@pytest.mark.parametrize('args,expected_subcommand', [
((), 'latest_comic_command'),
(('latest',), 'latest_comic_command'),
(('random',), 'random_comic_command'),
(('refresh',), 'refresh_db_command'),
(('refresh', 10), 'refresh_db_command'),
(('about', 10), 'comic_about_command'),
(('number', 10), 'comic_number_command'),
], ids=['empty', 'latest', 'random', 'refresh-empty', 'refresh-args', 'about-args', 'number-args'])
def test_command_calls_correct_subcommand(args, expected_subcommand, monkeypatch, mock_subcommand, mock_helga_command):
monkeypatch.setattr(command, expected_subcommand, mock_subcommand)
mock_helga_command(args)
assert mock_subcommand.call_count == 1
| mit | -3,742,131,429,410,104,000 | 24.617021 | 119 | 0.669435 | false |
pity7736/olimpiadas | tasks/mutations.py | 1 | 1169 | from graphene import InputObjectType, String, Int, Mutation, Boolean, Field
from graphene.types.datetime import DateTime
from tasks.models import Task, Category
from tasks.schemas import TaskNode, CategoryNode
class NameInput(InputObjectType):
name = String(required=True)
description = String()
class CreateCategoryInput(NameInput):
pass
class CreateCategoryMutation(Mutation):
ok = Boolean()
category = Field(CategoryNode)
class Arguments:
category_data = CreateCategoryInput()
def mutate(self, info, category_data):
category = Category.objects.create(**category_data)
return CreateCategoryMutation(ok=True, category=category)
class CreateTaskInput(NameInput):
name = String(required=True)
description = String()
owner_id = Int(required=True)
category_id = Int(required=True)
deadline = DateTime()
class CreateTaskMutation(Mutation):
ok = Boolean()
task = Field(TaskNode)
class Arguments:
task_data = CreateTaskInput()
def mutate(self, info, task_data):
task = Task.objects.create(**task_data)
return CreateTaskMutation(ok=True, task=task)
| gpl-3.0 | -1,950,752,744,791,763,700 | 24.413043 | 75 | 0.711719 | false |
hes19073/hesweewx | bin/weecfg/database.py | 1 | 26730 | #
# Copyright (c) 2009-2019 Tom Keffer <[email protected]> and
# Gary Roderick <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
"""Classes to support fixes or other bulk corrections of weewx data."""
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
# standard python imports
import datetime
import logging
import sys
import time
# weewx imports
import weedb
import weeutil.weeutil
import weewx.engine
import weewx.manager
import weewx.units
import weewx.wxservices
from weeutil.weeutil import timestamp_to_string, startOfDay, to_bool, option_as_list
log = logging.getLogger(__name__)
# ============================================================================
# class DatabaseFix
# ============================================================================
class DatabaseFix(object):
"""Base class for fixing bulk data in the weewx database.
Classes for applying different fixes the weewx database data should be
derived from this class. Derived classes require:
run() method: The entry point to apply the fix.
fix config dict: Dictionary containing config data specific to
the fix. Minimum fields required are:
name. The name of the fix. String.
"""
def __init__(self, config_dict, fix_config_dict):
"""A generic initialisation."""
# save our weewx config dict
self.config_dict = config_dict
# save our fix config dict
self.fix_config_dict = fix_config_dict
# get our name
self.name = fix_config_dict['name']
# is this a dry run
self.dry_run = to_bool(fix_config_dict.get('dry_run', True))
# Get the binding for the archive we are to use. If we received an
# explicit binding then use that otherwise use the binding that
# StdArchive uses.
try:
db_binding = fix_config_dict['binding']
except KeyError:
if 'StdArchive' in config_dict:
db_binding = config_dict['StdArchive'].get('data_binding',
'wx_binding')
else:
db_binding = 'wx_binding'
self.binding = db_binding
# get a database manager object
self.dbm = weewx.manager.open_manager_with_config(config_dict,
self.binding)
def run(self):
raise NotImplementedError("Method 'run' not implemented")
def genSummaryDaySpans(self, start_ts, stop_ts, obs='outTemp'):
"""Generator to generate a sequence of daily summary day TimeSpans.
Given an observation that has a daily summary table, generate a
sequence of TimeSpan objects for each row in the daily summary table.
In this way the generated sequence includes only rows included in the
daily summary rather than any 'missing' rows.
Input parameters:
start_ts: Include daily summary rows with a dateTime >= start_ts
stop_ts: Include daily summary rows with a dateTime <>= start_ts
obs: The weewx observation whose daily summary table is to be
used as the source of the TimeSpan objects
Returns:
A sequence of day TimeSpan objects
"""
_sql = "SELECT dateTime FROM %s_day_%s " \
" WHERE dateTime >= ? AND dateTime <= ?" % (self.dbm.table_name, obs)
_cursor = self.dbm.connection.cursor()
try:
for _row in _cursor.execute(_sql, (start_ts, stop_ts)):
yield weeutil.weeutil.archiveDaySpan(_row[0], grace=0)
finally:
_cursor.close()
def first_summary_ts(self, obs_type):
"""Obtain the timestamp of the earliest daily summary entry for an
observation type.
Imput:
obs_type: The observation type whose daily summary is to be checked.
Returns:
The timestamp of the earliest daily summary entry for obs_tpye
observation. None is returned if no record culd be found.
"""
_sql_str = "SELECT MIN(dateTime) FROM %s_day_%s" % (self.dbm.table_name,
obs_type)
_row = self.dbm.getSql(_sql_str)
if _row:
return _row[0]
return None
@staticmethod
def _progress(record, ts):
"""Utility function to show our progress while processing the fix.
Override in derived class to provide a different progress display.
To do nothing override with a pass statement.
"""
_msg = "Fixing database record: %d; Timestamp: %s\r" % (record, timestamp_to_string(ts))
print(_msg, end='', file=sys.stdout)
sys.stdout.flush()
# ============================================================================
# class WindSpeedRecalculation
# ============================================================================
class WindSpeedRecalculation(DatabaseFix):
"""Class to recalculate windSpeed daily maximum value. To recalculate the
windSpeed daily maximum values:
1. Create a dictionary of parameters required by the fix. The
WindSpeedRecalculation class uses the following parameters as indicated:
name: Name of the fix, for the windSpeed recalculation fix
this is 'windSpeed Recalculation'. String. Mandatory.
binding: The binding of the database to be fixed. Default is
the binding specified in weewx.conf [StdArchive].
String, eg 'binding_name'. Optional.
trans_days: Number of days of data used in each database
transaction. Integer, default is 50. Optional.
dry_run: Process the fix as if it was being applied but do not
write to the database. Boolean, default is True.
Optional.
2. Create an WindSpeedRecalculation object passing it a weewx config dict
and a fix config dict.
3. Call the resulting object's run() method to apply the fix.
"""
def __init__(self, config_dict, fix_config_dict):
"""Initialise our WindSpeedRecalculation object."""
# call our parents __init__
super(WindSpeedRecalculation, self).__init__(config_dict, fix_config_dict)
# log if a dry run
if self.dry_run:
log.info("maxwindspeed: This is a dry run. "
"Maximum windSpeed will be recalculated but not saved.")
log.debug("maxwindspeed: Using database binding '%s', "
"which is bound to database '%s'." %
(self.binding, self.dbm.database_name))
# number of days per db transaction, default to 50.
self.trans_days = int(fix_config_dict.get('trans_days', 50))
log.debug("maxwindspeed: Database transactions will use %s days of data." % self.trans_days)
def run(self):
"""Main entry point for applying the windSpeed Calculation fix.
Recalculating the windSpeed daily summary max field from archive data
is idempotent so there is no need to check whether the fix has already
been applied. Just go ahead and do it catching any exceptions we know
may be raised.
"""
# apply the fix but be prepared to catch any exceptions
try:
self.do_fix()
except weedb.NoTableError:
raise
except weewx.ViolatedPrecondition as e:
log.error("maxwindspeed: %s not applied: %s" % (self.name, e))
# raise the error so caller can deal with it if they want
raise
def do_fix(self):
"""Recalculate windSpeed daily summary max field from archive data.
Step through each row in the windSpeed daily summary table and replace
the max field with the max value for that day based on archive data.
Database transactions are done in self.trans_days days at a time.
"""
t1 = time.time()
log.info("maxwindspeed: Applying %s..." % self.name)
# get the start and stop Gregorian day number
start_ts = self.first_summary_ts('windSpeed')
if not start_ts:
print("Database empty. Nothing done.")
return
start_greg = weeutil.weeutil.toGregorianDay(start_ts)
stop_greg = weeutil.weeutil.toGregorianDay(self.dbm.last_timestamp)
# initialise a few things
day = start_greg
n_days = 0
last_start = None
while day <= stop_greg:
# get the start and stop timestamps for this tranche
tr_start_ts = weeutil.weeutil.startOfGregorianDay(day)
tr_stop_ts = weeutil.weeutil.startOfGregorianDay(day + self.trans_days - 1)
# start the transaction
with weedb.Transaction(self.dbm.connection) as _cursor:
# iterate over the rows in the windSpeed daily summary table
for day_span in self.genSummaryDaySpans(tr_start_ts, tr_stop_ts, 'windSpeed'):
# get the days max windSpeed and the time it occurred from
# the archive
(day_max_ts, day_max) = self.get_archive_span_max(day_span, 'windSpeed')
# now save the value and time in the applicable row in the
# windSpeed daily summary, but only if its not a dry run
if not self.dry_run:
self.write_max('windSpeed', day_span.start,
day_max, day_max_ts)
# increment our days done counter
n_days += 1
# give the user some information on progress
if n_days % 50 == 0:
self._progress(n_days, day_span.start)
last_start = day_span.start
# advance to the next tranche
day += self.trans_days
# we have finished, give the user some final information on progress,
# mainly so the total tallies with the log
self._progress(n_days, last_start)
print(file=sys.stdout)
tdiff = time.time() - t1
# We are done so log and inform the user
log.info("maxwindspeed: Maximum windSpeed calculated "
"for %s days in %0.2f seconds." % (n_days, tdiff))
if self.dry_run:
log.info("maxwindspeed: This was a dry run. %s was not applied." % self.name)
def get_archive_span_max(self, span, obs):
"""Find the max value of an obs and its timestamp in a span based on
archive data.
Gets the max value of an observation and the timestamp at which it
occurred from a TimeSpan of archive records. Raises a
weewx.ViolatedPrecondition error if the max value of the observation
field could not be determined.
Input parameters:
span: TimesSpan object of the period from which to determine
the interval value.
obs: The observation to be used.
Returns:
A tuple of the format:
(timestamp, value)
where:
timestamp is the epoch timestamp when the max value occurred
value is the max value of the observation over the time span
If no observation field values are found then a
weewx.ViolatedPrecondition error is raised.
"""
select_str = "SELECT dateTime, %(obs_type)s FROM %(table_name)s " \
"WHERE dateTime > %(start)s AND dateTime <= %(stop)s AND " \
"%(obs_type)s = (SELECT MAX(%(obs_type)s) FROM %(table_name)s " \
"WHERE dateTime > %(start)s and dateTime <= %(stop)s) AND " \
"%(obs_type)s IS NOT NULL"
interpolate_dict = {'obs_type': obs,
'table_name': self.dbm.table_name,
'start': span.start,
'stop': span.stop}
_row = self.dbm.getSql(select_str % interpolate_dict)
if _row:
try:
return _row[0], _row[1]
except IndexError:
_msg = "'%s' field not found in archive day %s." % (obs, span)
raise weewx.ViolatedPrecondition(_msg)
else:
return None, None
def write_max(self, obs, row_ts, value, when_ts, cursor=None):
"""Update the max and maxtime fields in an existing daily summary row.
Updates the max and maxtime fields in a row in a daily summary table.
Input parameters:
obs: The observation to be used. the daily summary updated will
be xxx_day_obs where xxx is the database archive table name.
row_ts: Timestamp of the row to be updated.
value: The value to be saved in field max
when_ts: The timestamp to be saved in field maxtime
cursor: Cursor object for the database connection being used.
Returns:
Nothing.
"""
_cursor = cursor or self.dbm.connection.cursor()
max_update_str = "UPDATE %s_day_%s SET %s=?,%s=? " \
"WHERE datetime=?" % (self.dbm.table_name, obs, 'max', 'maxtime')
_cursor.execute(max_update_str, (value, when_ts, row_ts))
if cursor is None:
_cursor.close()
@staticmethod
def _progress(ndays, last_time):
"""Utility function to show our progress while processing the fix."""
_msg = "Updating 'windSpeed' daily summary: %d; " \
"Timestamp: %s\r" % (ndays, timestamp_to_string(last_time, format_str="%Y-%m-%d"))
print(_msg, end='', file=sys.stdout)
sys.stdout.flush()
# ============================================================================
# class CalcMissing
# ============================================================================
class CalcMissing(DatabaseFix):
"""Class to calculate and store missing derived observations.
The following algorithm is used to calculate and store missing derived
observations:
1. Obtain a wxservices.WXCalculate() object to calculate the derived obs
fields for each record
2. Iterate over each day and record in the period concerned augmenting
each record with derived fields. Any derived fields that are missing
or == None are calculated. Days are processed in tranches and each
updated derived fields for each tranche are processed as a single db
transaction.
4. Once all days/records have been processed the daily summaries for the
period concerned are recalculated.
"""
def __init__(self, config_dict, calc_missing_config_dict):
"""Initialise a CalcMissing object.
config_dict: WeeWX config file as a dict
calc_missing_config_dict: A config dict with the following structure:
name: A descriptive name for the class
binding: data binding to use
start_ts: start ts of timespan over which missing derived fields
will be calculated
stop_ts: stop ts of timespan over which missing derived fields
will be calculated
trans_days: number of days of records per db transaction
dry_run: is this a dry run (boolean)
"""
# call our parents __init__
super(CalcMissing, self).__init__(config_dict, calc_missing_config_dict)
# the start timestamp of the period to calc missing
self.start_ts = int(calc_missing_config_dict.get('start_ts'))
# the stop timestamp of the period to calc missing
self.stop_ts = int(calc_missing_config_dict.get('stop_ts'))
# number of days per db transaction, default to 50.
self.trans_days = int(calc_missing_config_dict.get('trans_days', 10))
# is this a dry run, default to true
self.dry_run = to_bool(calc_missing_config_dict.get('dry_run', True))
self.config_dict = config_dict
def run(self):
"""Main entry point for calculating missing derived fields.
Calculate the missing derived fields for the timespan concerned, save
the calculated data to archive and recalculate the daily summaries.
"""
# record the current time
t1 = time.time()
# Instantiate a dummy engine, to be used to calculate derived variables. This will
# cause all the xtype services to get loaded.
engine = weewx.engine.DummyEngine(self.config_dict)
# While the above instantiated an instance of StdWXCalculate, we have no way of
# retrieving it. So, instantiate another one, then use that to calculate derived types.
wxcalculate = weewx.wxservices.StdWXCalculate(engine, self.config_dict)
# initialise some counters so we know what we have processed
days_updated = 0
days_processed = 0
total_records_processed = 0
total_records_updated = 0
# obtain gregorian days for our start and stop timestamps
start_greg = weeutil.weeutil.toGregorianDay(self.start_ts)
stop_greg = weeutil.weeutil.toGregorianDay(self.stop_ts)
# start at the first day
day = start_greg
while day <= stop_greg:
# get the start and stop timestamps for this tranche
tr_start_ts = weeutil.weeutil.startOfGregorianDay(day)
tr_stop_ts = min(weeutil.weeutil.startOfGregorianDay(stop_greg + 1),
weeutil.weeutil.startOfGregorianDay(day + self.trans_days))
# start the transaction
with weedb.Transaction(self.dbm.connection) as _cursor:
# iterate over each day in the tranche we are to work in
for tranche_day in weeutil.weeutil.genDaySpans(tr_start_ts, tr_stop_ts):
# initialise a counter for records processed on this day
records_updated = 0
# iterate over each record in this day
for record in self.dbm.genBatchRecords(startstamp=tranche_day.start,
stopstamp=tranche_day.stop):
# but we are only concerned with records after the
# start and before or equal to the stop timestamps
if self.start_ts < record['dateTime'] <= self.stop_ts:
# first obtain a list of the fields that may be calculated
extras_list = []
for obs in wxcalculate.calc_dict:
directive = wxcalculate.calc_dict[obs]
if directive == 'software' \
or directive == 'prefer_hardware' \
and (obs not in record or record[obs] is None):
extras_list.append(obs)
# calculate the missing derived fields for the record
wxcalculate.do_calculations(record)
# Obtain a new record dictionary that contains only those items
# that wxcalculate calculated. Use dictionary comprehension.
extras_dict = {k:v for (k,v) in record.items() if k in extras_list}
# update the archive with the calculated data
records_updated += self.update_record_fields(record['dateTime'],
extras_dict)
# update the total records processed
total_records_processed += 1
# Give the user some information on progress
if total_records_processed % 1000 == 0:
p_msg = "Processing record: %d; Last record: %s" % (total_records_processed,
timestamp_to_string(record['dateTime']))
self._progress(p_msg)
# update the total records updated
total_records_updated += records_updated
# if we updated any records on this day increment the count
# of days updated
days_updated += 1 if records_updated > 0 else 0
days_processed += 1
# advance to the next tranche
day += self.trans_days
# finished, so give the user some final information on progress, mainly
# so the total tallies with the log
p_msg = "Processing record: %d; Last record: %s" % (total_records_processed,
timestamp_to_string(tr_stop_ts))
self._progress(p_msg, overprint=False)
# now update the daily summaries, but only if this is not a dry run
if not self.dry_run:
print("Recalculating daily summaries...")
# first we need a start and stop date object
start_d = datetime.date.fromtimestamp(self.start_ts)
# Since each daily summary is identified by the midnight timestamp
# for that day we need to make sure we our stop timestamp is not on
# a midnight boundary or we will rebuild the following days sumamry
# as well. if it is on a midnight boundary just subtract 1 second
# and use that.
summary_stop_ts = self.stop_ts
if weeutil.weeutil.isMidnight(self.stop_ts):
summary_stop_ts -= 1
stop_d = datetime.date.fromtimestamp(summary_stop_ts)
# do the update
self.dbm.backfill_day_summary(start_d=start_d, stop_d=stop_d)
print(file=sys.stdout)
print("Finished recalculating daily summaries")
else:
# it's a dry run so say the rebuild was skipped
print("This is a dry run, recalculation of daily summaries was skipped")
tdiff = time.time() - t1
# we are done so log and inform the user
_day_processed_str = "day" if days_processed == 1 else "days"
_day_updated_str = "day" if days_updated == 1 else "days"
if not self.dry_run:
log.info("Processed %d %s consisting of %d records. "
"%d %s consisting of %d records were updated "
"in %0.2f seconds." % (days_processed,
_day_processed_str,
total_records_processed,
days_updated,
_day_updated_str,
total_records_updated,
tdiff))
else:
# this was a dry run
log.info("Processed %d %s consisting of %d records. "
"%d %s consisting of %d records would have been updated "
"in %0.2f seconds." % (days_processed,
_day_processed_str,
total_records_processed,
days_updated,
_day_updated_str,
total_records_updated,
tdiff))
def update_record_fields(self, ts, record, cursor=None):
"""Update multiple fields in a given archive record.
Updates multiple fields in an archive record via an update query.
Inputs:
ts: epoch timestamp of the record to be updated
record: dict containing the updated data in field name-value pairs
cursor: sqlite cursor
"""
# Only data types that appear in the database schema can be
# updated. To find them, form the intersection between the set of
# all record keys and the set of all sql keys
record_key_set = set(record.keys())
update_key_set = record_key_set.intersection(self.dbm.sqlkeys)
# only update if we have data for at least one field that is in the schema
if len(update_key_set) > 0:
# convert to an ordered list
key_list = list(update_key_set)
# get the values in the same order
value_list = [record[k] for k in key_list]
# Construct the SQL update statement. First construct the 'SET'
# argument, we want a string of comma separated `field_name`=?
# entries. Each ? will be replaced by a value from update value list
# when the SQL statement is executed. We should not see any field
# names that are SQLite/MySQL reserved words (eg interval) but just
# in case enclose field names in backquotes.
set_str = ','.join(["`%s`=?" % k for k in key_list])
# form the SQL update statement
sql_update_stmt = "UPDATE %s SET %s WHERE dateTime=%s" % (self.dbm.table_name,
set_str,
ts)
# obtain a cursor if we don't have one
_cursor = cursor or self.dbm.connection.cursor()
# execute the update statement but only if its not a dry run
if not self.dry_run:
_cursor.execute(sql_update_stmt, value_list)
# close the cursor is we opened one
if cursor is None:
_cursor.close()
# if we made it here the record was updated so return the number of
# records updated which will always be 1
return 1
# there were no fields to update so return 0
return 0
@staticmethod
def _progress(message, overprint=True):
"""Utility function to show our progress."""
if overprint:
print(message + "\r", end='')
else:
print(message)
sys.stdout.flush()
| gpl-3.0 | -292,630,650,972,100,030 | 44.770548 | 120 | 0.557651 | false |
nfletton/bvspca | bvspca/animals/petpoint.py | 1 | 4840 | import datetime
import html
import io
import logging
from lxml import etree
logger = logging.getLogger('bvspca.animals.petpoint')
error_logger = logging.getLogger('bvspca.animals.petpoint.errors')
def extract_animal_ids(animal_summary_etree):
animal_ids = []
for animal in animal_summary_etree.findall('.//adoptableSearch'):
id = animal.find('ID')
if id is not None:
animal_ids.append(int(id.text))
return animal_ids
def fetch_petpoint_adoptable_animal_ids(session, base_url):
params = {
'speciesID': 0,
'sex': 'All',
'ageGroup': 'All',
'location': '',
'site': 0,
'onHold': 'A',
'orderBy': 'ID',
'primaryBreed': '',
'secondaryBreed': '',
'SpecialNeeds': 'A',
'noDogs': 'A',
'noCats': 'A',
'noKids': 'A',
'stageID': '',
}
adoptable_search_response = session.get(base_url.format('AdoptableSearch'), params=params)
if adoptable_search_response.status_code is 200:
return extract_animal_ids(etree.parse(io.BytesIO(adoptable_search_response.content)))
else:
error_logger.error(
'Failed to retrieve adoptable animals. HTTP status code {}'.format(
adoptable_search_response.status_code,
)
)
def extract_animal_adoption_dates(adoptions_etree):
"""
Extract animal ids and adoption dates from etree response for
PetPoints AdoptionList API call
:param adoptions_etree:
:return: a list of animal id and adoption date tuples
"""
animal_adoptions = []
for adoption in adoptions_etree.findall('.//adoption'):
id = adoption.find('AnimalID')
adoption_date = adoption.find('AdoptionDate')
if id is not None:
animal_adoptions.append(
(
int(id.text),
datetime.datetime.strptime(adoption_date.text[:10], '%Y-%m-%d').date()
)
)
return animal_adoptions
def fetch_petpoint_adopted_dates_since(session, base_url, start_date):
"""
From the start date, retrieve all adopted animals
:param session: requests session object
:param base_url: base url of petpoint services
:param start_date: the date to start checking for adoptions
:return: a list of animal id and adoption date tuples
"""
one_day_delta = datetime.timedelta(days=1)
loop_date = start_date
end_date = datetime.date.today()
all_adoptions = []
while loop_date <= end_date:
params = {
'adoptionDate': loop_date,
'siteID': '',
}
adoption_list_response = session.get(base_url.format('AdoptionList'), params=params)
if adoption_list_response.status_code is 200:
all_adoptions.extend(extract_animal_adoption_dates(etree.parse(io.BytesIO(adoption_list_response.content))))
else:
error_logger.error(
'Failed to retrieve adopted animals for day {}. HTTP status code {}'.format(
loop_date,
adoption_list_response.status_code,
)
)
loop_date += one_day_delta
return all_adoptions
def extract_animal(animal_detail_etree):
return PetPointAnimal(animal_detail_etree)
def fetch_petpoint_animal(session, base_url, animal_id):
params = {
'animalID': animal_id,
}
animal_details_response = session.get(base_url.format('AdoptableDetails'), params=params)
if animal_details_response.status_code is 200:
return extract_animal(etree.parse(io.BytesIO(animal_details_response.content)))
else:
error_logger.error(
'Failed to retrieve animal {} details. HTTP status code: {}. Reason: {}'.format(
animal_id,
animal_details_response.status_code,
animal_details_response.reason,
)
)
class PetPointAnimal:
def __init__(self, element):
self.element = element
def __getattr__(self, propName):
property_element = self.element.find(propName)
if property_element is not None:
property_value = property_element.text.strip()
if propName in ['ID', 'Age']:
return int(property_value)
if propName in ['DateOfSurrender', 'LastIntakeDate']:
return datetime.datetime.strptime(property_value[:10], '%Y-%m-%d').date()
if propName in ['NoDogs', 'NoCats', 'NoKids']:
return True if property_value == 'Y' else False
if propName in ['OnHold']:
return True if property_value == 'Yes' else False
if property_value is None:
return ''
return html.unescape(property_value)
return None
| mit | -7,226,197,578,575,516,000 | 32.846154 | 120 | 0.597727 | false |
tboyce1/home-assistant | homeassistant/components/panel_iframe.py | 2 | 1256 | """
Register an iFrame front end panel.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/panel_iframe/
"""
import asyncio
import voluptuous as vol
from homeassistant.const import (CONF_ICON, CONF_URL)
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['frontend']
DOMAIN = 'panel_iframe'
CONF_TITLE = 'title'
CONF_RELATIVE_URL_ERROR_MSG = "Invalid relative URL. Absolute path required."
CONF_RELATIVE_URL_REGEX = r'\A/'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: {
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_URL): vol.Any(
vol.Match(
CONF_RELATIVE_URL_REGEX,
msg=CONF_RELATIVE_URL_ERROR_MSG),
cv.url),
}})}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def setup(hass, config):
"""Set up the iFrame frontend panels."""
for url_path, info in config[DOMAIN].items():
yield from hass.components.frontend.async_register_built_in_panel(
'iframe', info.get(CONF_TITLE), info.get(CONF_ICON),
url_path, {'url': info[CONF_URL]})
return True
| apache-2.0 | -8,715,024,929,982,392,000 | 27.545455 | 77 | 0.640924 | false |
yarikoptic/NiPy-OLD | nipy/neurospin/register/realign4d.py | 1 | 10220 | from routines import cspline_transform, cspline_sample4d, slice_time
from transform import Affine, apply_affine, BRAIN_RADIUS_MM
import numpy as np
from scipy import optimize
DEFAULT_SPEEDUP = 4
DEFAULT_OPTIMIZER = 'powell'
DEFAULT_WITHIN_LOOPS = 2
DEFAULT_BETWEEN_LOOPS = 5
def grid_coords(xyz, affine, from_world, to_world):
Tv = np.dot(from_world, np.dot(affine, to_world))
XYZ = apply_affine(Tv, xyz)
return XYZ[0,:], XYZ[1,:], XYZ[2,:]
class Image4d(object):
"""
Class to represent a sequence of 3d scans acquired on a slice-by-slice basis.
"""
def __init__(self, array, to_world, tr, tr_slices=None, start=0.0,
slice_order='ascending', interleaved=False, slice_axis=2):
"""
Configure fMRI acquisition time parameters.
tr : inter-scan repetition time, i.e. the time elapsed between two consecutive scans
tr_slices : inter-slice repetition time, same as tr for slices
start : starting acquisition time respective to the implicit time origin
slice_order : string or array
"""
self.array = array
self.to_world = to_world
nslices = array.shape[slice_axis]
# Default slice repetition time (no silence)
if tr_slices == None:
tr_slices = tr/float(nslices)
# Set slice order
if isinstance(slice_order, str):
if not interleaved:
aux = range(nslices)
else:
p = nslices/2
aux = []
for i in range(p):
aux.extend([i,p+i])
if nslices%2:
aux.append(nslices-1)
if slice_order == 'descending':
aux.reverse()
slice_order = aux
# Set timing values
self.nslices = nslices
self.tr = float(tr)
self.tr_slices = float(tr_slices)
self.start = float(start)
self.slice_order = np.asarray(slice_order)
self.interleaved = bool(interleaved)
## assume that the world referential is 'scanner' as defined
## by the nifti norm
self.reversed_slices = to_world[slice_axis][slice_axis]<0
def z_to_slice(self, z):
"""
Account for the fact that slices may be stored in reverse
order wrt the scanner coordinate system convention (slice 0 ==
bottom of the head)
"""
if self.reversed_slices:
return self.nslices - 1 - z
else:
return z
def to_time(self, z, t):
"""
t = to_time(zv, tv)
zv, tv are grid coordinates; t is an actual time value.
"""
return(self.start + self.tr*t + slice_time(self.z_to_slice(z), self.tr_slices, self.slice_order))
def from_time(self, z, t):
"""
tv = from_time(zv, t)
zv, tv are grid coordinates; t is an actual time value.
"""
return((t - self.start - slice_time(self.z_to_slice(z), self.tr_slices, self.slice_order))/self.tr)
def get_data(self):
return self.array
def get_affine(self):
return self.to_world
class Realign4d(object):
def __init__(self,
im4d,
speedup=DEFAULT_SPEEDUP,
optimizer=DEFAULT_OPTIMIZER,
transforms=None):
self.optimizer = optimizer
dims = im4d.array.shape
self.dims = dims
self.nscans = dims[3]
# Define mask
speedup = max(1, int(speedup))
xyz = np.mgrid[0:dims[0]:speedup, 0:dims[1]:speedup, 0:dims[2]:speedup]
self.xyz = xyz.reshape(3, np.prod(xyz.shape[1::]))
masksize = self.xyz.shape[1]
self.data = np.zeros([masksize, self.nscans], dtype='double')
# Initialize space/time transformation parameters
self.to_world = im4d.to_world
self.from_world = np.linalg.inv(self.to_world)
if transforms == None:
self.transforms = [Affine('rigid', radius=BRAIN_RADIUS_MM) for scan in range(self.nscans)]
else:
self.transforms = transforms
self.from_time = im4d.from_time
self.timestamps = im4d.tr*np.array(range(self.nscans))
# Compute the 4d cubic spline transform
self.cbspline = cspline_transform(im4d.array)
def resample_inmask(self, t):
X, Y, Z = grid_coords(self.xyz, self.transforms[t], self.from_world, self.to_world)
T = self.from_time(Z, self.timestamps[t])
cspline_sample4d(self.data[:,t], self.cbspline, X, Y, Z, T)
def resample_all_inmask(self):
for t in range(self.nscans):
print('Resampling scan %d/%d' % (t+1, self.nscans))
self.resample_inmask(t)
def init_motion_detection(self, t):
"""
The idea is to compute the global variance using the following
decomposition:
V = (n-1)/n V1 + (n-1)/n^2 (x1-m1)^2
= alpha + beta d2,
with alpha=(n-1)/n V1, beta = (n-1)/n^2, d2 = (x1-m1)^2.
Only the second term is variable when one image moves while
all other images are fixed.
"""
self.resample_inmask(t)
fixed = range(self.nscans)
fixed.remove(t)
aux = self.data[:, fixed]
self.m1 = aux.mean(1)
self.d2 = np.zeros(np.shape(self.m1))
self.alpha = ((self.nscans-1.0)/self.nscans)*aux.var(1).mean()
self.beta = (self.nscans-1.0)/self.nscans**2
def msid(self, t):
"""
Mean square intensity difference
"""
self.resample_inmask(t)
self.d2[:] = self.data[:,t]
self.d2 -= self.m1
self.d2 **= 2
return self.d2.mean()
def variance(self, t):
return self.alpha + self.beta*self.msid(t)
def safe_variance(self, t):
"""
No need to invoke self.init_motion_detection.
"""
self.resample_inmask(t)
self.m = self.data.mean(1)
self.m2 = (self.data**2).mean(1)
self.m **= 2
self.m2 -= self.m
return self.m2.mean()
def correct_motion(self):
optimizer = self.optimizer
def callback(pc):
self.transforms[t].from_param(pc)
print(self.transforms[t])
if optimizer=='simplex':
fmin = optimize.fmin
elif optimizer=='powell':
fmin = optimize.fmin_powell
elif optimizer=='conjugate_gradient':
fmin = optimize.fmin_cg
else:
raise ValueError('Unrecognized optimizer')
# Resample data according to the current space/time transformation
self.resample_all_inmask()
# Optimize motion parameters
for t in range(self.nscans):
print('Correcting motion of scan %d/%d...' % (t+1, self.nscans))
def loss(pc):
self.transforms[t].from_param(pc)
return self.msid(t)
self.init_motion_detection(t)
pc0 = self.transforms[t].to_param()
pc = fmin(loss, pc0, callback=callback)
self.transforms[t].from_param(pc)
def resample(self):
print('Gridding...')
dims = self.dims
XYZ = np.mgrid[0:dims[0], 0:dims[1], 0:dims[2]]
XYZ = XYZ.reshape(3, np.prod(XYZ.shape[1::]))
res = np.zeros(dims)
for t in range(self.nscans):
print('Fully resampling scan %d/%d' % (t+1, self.nscans))
X, Y, Z = grid_coords(XYZ, self.transforms[t], self.from_world, self.to_world)
T = self.from_time(Z, self.timestamps[t])
cspline_sample4d(res[:,:,:,t], self.cbspline, X, Y, Z, T)
return res
def _resample4d(im4d, transforms=None):
"""
corr_im4d_array = _resample4d(im4d, transforms=None)
"""
r = Realign4d(im4d, transforms=transforms)
return r.resample()
def _realign4d(im4d,
loops=DEFAULT_WITHIN_LOOPS,
speedup=DEFAULT_SPEEDUP,
optimizer=DEFAULT_OPTIMIZER):
"""
transforms = _realign4d(im4d, loops=2, speedup=4, optimizer='powell')
Parameters
----------
im4d : Image4d instance
"""
r = Realign4d(im4d, speedup=speedup, optimizer=optimizer)
for loop in range(loops):
r.correct_motion()
return r.transforms
def realign4d(runs,
within_loops=DEFAULT_WITHIN_LOOPS,
between_loops=DEFAULT_BETWEEN_LOOPS,
speedup=DEFAULT_SPEEDUP,
optimizer=DEFAULT_OPTIMIZER,
align_runs=True):
"""
transforms = realign4d(runs, within_loops=2, bewteen_loops=5, speedup=4, optimizer='powell')
Parameters
----------
runs : list of Image4d objects
Returns
-------
transforms : list
nested list of rigid transformations
"""
# Single-session case
if not isinstance(runs, list) and not isinstance(runs, tuple):
runs = [runs]
nruns = len(runs)
# Correct motion and slice timing in each sequence separately
transfo_runs = [_realign4d(run, loops=within_loops, speedup=speedup, optimizer=optimizer) for run in runs]
if nruns==1:
return transfo_runs[0]
if align_runs==False:
return transfo_runs
# Correct between-session motion using the mean image of each corrected run
corr_runs = [_resample4d(runs[i], transforms=transfo_runs[i]) for i in range(nruns)]
aux = np.rollaxis(np.asarray([corr_run.mean(3) for corr_run in corr_runs]), 0, 4)
## Fake time series with zero inter-slice time
## FIXME: check that all runs have the same to-world transform
mean_img = Image4d(aux, to_world=runs[0].to_world, tr=1.0, tr_slices=0.0)
transfo_mean = _realign4d(mean_img, loops=between_loops, speedup=speedup, optimizer=optimizer)
corr_mean = _resample4d(mean_img, transforms=transfo_mean)
# Compose transformations for each run
for i in range(nruns):
run_to_world = transfo_mean[i]
transforms = [run_to_world*to_run for to_run in transfo_runs[i]]
transfo_runs[i] = transforms
return transfo_runs
| bsd-3-clause | -338,591,949,903,743,800 | 32.074434 | 110 | 0.57593 | false |
brene/slide-transition-detector | mediaoutput.py | 1 | 6077 | from abc import ABCMeta, abstractmethod
import datetime
import cv2
import math
import os
import errno
class MediaWriter(object):
"""
Abstract class for all media outputs. Forcing each inheritance
to have a write class.
"""
__metaclass__ = ABCMeta
@abstractmethod
def write(self, content, *args):
"""
Write method to write media to disk
:param media: the media to be written
:param args: additional arguments that may be helpful
"""
pass
class NullWriter(MediaWriter):
def write(self, content, *args):
pass
class ImageWriter(MediaWriter):
"""
The ImageWriter will write an image to disk.
"""
__metaclass__ = ABCMeta
def __init__(self, prefix, file_format):
"""
Default initializer
:param prefix: the filename prefix a counter will be added
after this string and incremented after each write to disk
:param file_format: the file format for the images.
"""
if not file_format.startswith('.'):
file_format = '.' + file_format
if prefix is not None:
setup_dirs(prefix)
self.name = prefix + file_format
def write(self, img, *args):
"""
Writes the given image to the location specified through the
initializer
:param img: the image that will be written to disk
"""
cv2.imwrite(self.name % self.next_name(args), img)
@abstractmethod
def next_name(self, *args):
"""
This abstract method returns the object that should be inserted
into the filename
:param args: the args, that is passed to write_image
:return: the object that will be inserted into the filename
"""
class CustomImageWriter(ImageWriter):
"""
Image Writer that uses a custom name. It takes it as the first
argument in *args in the write method.
"""
def __init__(self, prefix=None, file_format='.jpg'):
"""
Default initializer
:param prefix: the file location and file name prefix
:param file_format: the file format e.g. .jpg, .png
"""
super(CustomImageWriter, self).__init__(prefix + '%s', file_format)
def next_name(self, *args):
return args[0]
class IncrementalImageWriter(ImageWriter):
"""
The IncrementalImageWriter will write an image to disk and append a
number to the file name. This number will be auto-incremented by the
specified step size after each write.
"""
def __init__(self, prefix=None, file_format='.jpg', start=0, step=1):
"""
Default initializer
:param prefix: the file location and file name
:param file_format: the file format e.g. .jpg, .png
:param start: the starting number for the incremental count
:param step: the step by which the count should increment
"""
self.count = start - step
self.step = step
if prefix is not None:
prefix += '%d'
super(IncrementalImageWriter, self).__init__(prefix, file_format)
def next_name(self, *args):
self.count += self.step
return self.count
class TimestampImageWriter(ImageWriter):
"""
TimestampImageWriter is a ImageWriter that adds the timestamp of when
the image was first shown in the original stream
"""
def __init__(self, fps, prefix=None, file_format='.jpg'):
"""
Default initializer
:param fps: The number of frames per second in the original stream
:param prefix: the prefix of the path to the output location
:param file_format: the file format of the output image
"""
self.fps = fps
if prefix is not None:
prefix += '%s'
super(TimestampImageWriter, self).__init__(prefix, file_format)
def next_name(self, args):
current_frame = args[0]
seconds = current_frame / self.fps
milliseconds = seconds - math.floor(seconds)
if milliseconds == 0:
milliseconds = '000'
else:
milliseconds = str(int(milliseconds * (10 ** 3)))
return str(datetime.timedelta(seconds=int(seconds))) + '.' + milliseconds.zfill(3)
class TimetableWriter(MediaWriter):
"""
The Timetable Writer outputs each slide iteratively using
the IncrementalImageWriter. Additionally it outputs a ".txt"
document containing the slide name and their appearances.
"""
def __init__(self, output_dir, timetable_loc, file_format):
"""
Default initializer
:param output_dir: the output directory for the sorted slides
:param timetable_file: where the timetable file should be stored
"""
setup_dirs(timetable_loc)
self.timetable = open(timetable_loc, 'w')
self.img_writer = IncrementalImageWriter(prefix=output_dir, start=1, file_format=file_format)
self.txt_writer = TextWriter(self.timetable)
def write(self, slides, *args):
i = 1
for slide in slides:
if slide.marked:
continue
self.img_writer.write(slide.img)
appearances = slide.time
for com in slide.times:
appearances += " " + com
self.txt_writer.write("Slide %d: %s\n" % (i, appearances))
i += 1
def close(self):
self.timetable.close()
class TextWriter(MediaWriter):
def __init__(self, output_file):
self.output_file = output_file
def write(self, content, *args):
self.output_file.write(content)
def setup_dirs(path):
"""
Takes a path and makes sure that directories to the path
gets created and is writable.
:param filename: the path to file
"""
path = os.path.dirname(path)
if path == '':
return
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
| mit | 3,480,623,246,772,520,000 | 30.005102 | 101 | 0.61165 | false |
vesellov/bitdust.devel | transport/http/http_node.py | 1 | 13830 | #!/usr/bin/python
# http_node.py
#
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (http_node.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at [email protected]
#
#
#
#
"""
.. module:: http_node.
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
#------------------------------------------------------------------------------
_Debug = True
_DebugLevel = 8
#------------------------------------------------------------------------------
import os
import sys
import time
import base64
#------------------------------------------------------------------------------
try:
from twisted.internet import reactor
except:
sys.exit('Error initializing twisted.internet.reactor in tcp_node.py')
from twisted.internet import protocol
from twisted.internet.defer import Deferred,succeed
from twisted.internet.error import CannotListenError
from twisted.web.client import HTTPClientFactory
from twisted.web import server, resource
#------------------------------------------------------------------------------
if __name__ == '__main__':
import os.path as _p
sys.path.insert(0, _p.abspath(_p.join(_p.dirname(_p.abspath(sys.argv[0])), '..')))
sys.path.insert(0, _p.abspath(_p.join(_p.dirname(_p.abspath(sys.argv[0])), '..', '..')))
#------------------------------------------------------------------------------
from logs import lg
from lib import nameurl
from lib import net_misc
from lib import strng
from system import bpio
from system import tmpfile
from contacts import contactsdb
from contacts import identitycache
from userid import my_id
from main import settings
#------------------------------------------------------------------------------
_Outbox = {}
_Contacts = {}
_Receiver = None
_ReceivingLoop = None
_ServerListener = None
_LastPingTimeDict = {}
_PingDelayDict = {}
_ConnectionsDict = {}
_CurrentDelay = 5
#------------------------------------------------------------------------------
# def init(receiving=True, sending_port=None):
# """
# """
# lg.out(4, 'http_node.init')
# contactsdb.AddContactsChangedCallback(on_contacts_changed)
# if sending_port:
# start_sending(sending_port)
# if receiving:
# start_receiving()
# def shutdown():
# """
# """
# contactsdb.RemoveContactsChangedCallback(on_contacts_changed)
# stop_sending()
# stop_receiving()
#------------------------------------------------------------------------------
def start_sending(port):
global _ServerListener
if _ServerListener is not None:
lg.out(8, 'http_node.start_http_server is already started')
return _ServerListener
lg.out(6, 'http_node.start_http_server going to listen on port ' + str(port))
site = server.Site(SenderServer())
try:
_ServerListener = reactor.listenTCP(int(port), site)
except:
lg.exc()
_ServerListener = None
return _ServerListener
def stop_sending():
global _ServerListener
lg.out(6, 'http_node.stop_sending')
if _ServerListener is None:
lg.out(8, 'http_node.stop_sending _ServerListener is None')
d = Deferred()
d.callback('')
return d
d = _ServerListener.stopListening()
_ServerListener = None
return d
#------------------------------------------------------------------------------
def send_file(idurl, filename):
lg.out(12, 'http_node.send to %s %s' % (idurl, filename))
global _Outbox
if idurl not in _Outbox:
_Outbox[idurl] = []
_Outbox[idurl].append(filename)
#we want to keep only 10 last files.
if len(_Outbox[idurl]) > 10:
lostedfilename = _Outbox[idurl].pop(0)
lg.warn('losted: "%s"' % lostedfilename)
# transport_control.sendStatusReport(
# 'unknown',
# lostedfilename,
# 'failed',
# 'http',)
#------------------------------------------------------------------------------
class SenderServer(resource.Resource):
isLeaf = True
def render_POST(self, request):
global _Outbox
idurl = request.getHeader('idurl')
if idurl is None:
return ''
lg.out(14, 'http_node.SenderServer.render connection from ' + idurl)
if idurl not in list(_Outbox.keys()):
return ''
r = ''
for filename in _Outbox[idurl]:
if not os.path.isfile(filename):
continue
if not os.access(filename, os.R_OK):
continue
src = bpio.ReadBinaryFile(filename)
if src == '':
continue
src64 = base64.b64encode(src)
r += src64 + '\n'
lg.out(12, 'http_node.SenderServer.render sent %s to %s' % (filename, idurl))
#TODO request.getPeer()
# transport_control.sendStatusReport(
# request.getClient(),
# filename,
# 'finished',
# 'http',)
_Outbox.pop(idurl, None)
return r
#------------------------------------------------------------------------------
class TransportHTTPClientFactory(HTTPClientFactory):
pass
class TransportHTTPProxyClientFactory(HTTPClientFactory):
def setURL(self, url):
HTTPClientFactory.setURL(self, url)
self.path = url
#------------------------------------------------------------------------------
class Receiver(object):
def loop(self):
global _ReceivingLoop
global _Contacts
global _ToIncreaseDelay
global _LastPingTimeDict
global _PingDelayDict
global _ConnectionsDict
global _CurrentDelay
lg.out(6, 'http_node.Receiver.loop')
# _CurrentDelay = settings.getHTTPDelay()
for idurl, hostport in _Contacts.items():
if idurl in _ConnectionsDict:
continue
lasttm = _LastPingTimeDict.get(idurl, 0)
delay = _PingDelayDict.get(idurl, _CurrentDelay)
dt = time.time() - lasttm
if dt < delay:
continue
_ConnectionsDict[idurl] = self.do_ping(idurl, hostport[0], hostport[1])
_LastPingTimeDict[idurl] = time.time()
_ReceivingLoop = reactor.callLater(1, self.loop)
return _ReceivingLoop
def on_ping_success(self, src, idurl, host, port, conn):
global _LastPingTimeDict
global _ConnectionsDict
if len(src) == 0:
increase_receiving_delay(idurl)
else:
parts = src.splitlines()
lg.out(14, 'http_node.receive.success %d bytes in %d parts from %s (%s:%s)' % (len(src), len(parts), idurl, host, port))
for part64 in parts:
try:
part = base64.b64decode(part64.strip())
except:
lg.out(14, 'http_node.receive.success ERROR in base64.b64decode()')
decrease_receiving_delay(idurl)
continue
fd, filename = tmpfile.make("http-in", extension='.http')
os.write(fd, part)
os.close(fd)
decrease_receiving_delay(idurl)
# transport_control.receiveStatusReport(
# filename,
# 'finished',
# 'http',
# host+':'+port,)
# transport_control.log('http', 'finish connection with %s:%s ' % (host, port))
conn.disconnect()
# TODO: keep it opened!
_ConnectionsDict.pop(idurl, None)
def on_ping_failed(self, x, idurl, host, port, conn):
global _LastPingTimeDict
global _ConnectionsDict
increase_receiving_delay(idurl)
conn.disconnect()
_ConnectionsDict.pop(idurl, None)
def do_ping(self, idurl, host, port):
lg.out(14, 'http_node.receive.ping %s (%s:%s)' % (idurl, host, port))
url = b'http://' + host + b':' + strng.to_bin(str(port))
if net_misc.proxy_is_on():
f = TransportHTTPProxyClientFactory(url, method='POST', headers={
'User-Agent': 'DataHaven.NET transport_http', 'idurl': my_id.getLocalID(), } )
conn = reactor.connectTCP(net_misc.get_proxy_host(), int(net_misc.get_proxy_port()), f)
else:
f = TransportHTTPClientFactory(url, method='POST', headers={
'User-Agent': 'DataHaven.NET transport_http', 'idurl': my_id.getLocalID(), } )
conn = reactor.connectTCP(host, int(port), f)
f.deferred.addCallback(self.on_ping_success, idurl, host, port, conn)
f.deferred.addErrback(self.on_ping_failed, idurl, host, port, conn)
return conn
#------------------------------------------------------------------------------
def decrease_receiving_delay(idurl):
global _PingDelayDict
global _CurrentDelay
lg.out(14, 'http_node.decrease_receiving_delay ' + idurl)
_PingDelayDict[idurl] = _CurrentDelay
def increase_receiving_delay(idurl):
global _PingDelayDict
global _CurrentDelay
if idurl not in _PingDelayDict:
_PingDelayDict[idurl] = _CurrentDelay
d = _PingDelayDict[idurl]
if d < settings.DefaultSendTimeOutHTTP() / 2:
lg.out(14, 'http_node.increase_receiving_delay %s for %s' % (str(d), idurl))
_PingDelayDict[idurl] *= 2
#------------------------------------------------------------------------------
def start_receiving():
global _Receiver
if _Receiver is not None:
lg.warn('already started')
return _Receiver
_Receiver = Receiver()
_Receiver.loop()
return _Receiver
def stop_receiving():
global _ReceivingLoop
lg.out(6, 'http_node.stop_receiving')
if _ReceivingLoop is None:
lg.out(8, 'http_node.stop_receiving _ReceivingLoop is None')
return
if _ReceivingLoop.called:
lg.out(8, 'http_node.stop_receiving _ReceivingLoop is already called')
return
_ReceivingLoop.cancel()
del _ReceivingLoop
_ReceivingLoop = None
#------------------------------------------------------------------------------
def push_contact(idurl):
global _Contacts
global _PingDelayDict
global _CurrentDelay
ident = identitycache.FromCache(idurl)
if ident is None:
lg.err('"%s" not in the cache' % idurl)
return None
http_contact = ident.getProtoContact('http')
if http_contact is None:
if _Debug:
lg.out(_DebugLevel * 2, 'http_node.add_contact SKIP "%s" : no http contacts found in identity' % idurl)
return None
_, host, port, _ = nameurl.UrlParse(http_contact)
new_item = False
if idurl in _Contacts:
new_item = True
_Contacts[idurl] = (host, port)
_PingDelayDict[idurl] = _CurrentDelay
if new_item:
if _Debug:
lg.out(_DebugLevel, 'http_node.add_contact ADDED "%s" on %s:%s' % (idurl, host, port))
else:
if _Debug:
lg.out(_DebugLevel, 'http_node.add_contact UPDATED "%s" on %s:%s' % (idurl, host, port))
return idurl
#------------------------------------------------------------------------------
def do_update_contacts():
global _Contacts
if _Debug:
lg.out(_DebugLevel, 'http_node.update_contacts')
_Contacts.clear()
for idurl in contactsdb.contacts(include_all=True):
lg.out(10, 'http_node.update_contacts want ' + idurl)
if idurl == my_id.getLocalID():
continue
latest_identity = identitycache.GetLatest(idurl)
if isinstance(latest_identity, Deferred):
latest_identity.addCallback(lambda src: push_contact(idurl))
latest_identity.addErrback(lambda err: lg.out(
_DebugLevel, 'http_node.update_contacts "%s" failed to cache' % idurl) if _Debug else None)
else:
push_contact(idurl)
#------------------------------------------------------------------------------
def on_contacts_changed(oldlist, newlist):
do_update_contacts()
#------------------------------------------------------------------------------
def usage():
print('''usage:
http_node.py send [server_port] [to idurl] [filename]
http_node.py receive
''')
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
from twisted.internet.defer import setDebugging
setDebugging(True)
# from twisted.python import log as twisted_log
# twisted_log.startLogging(sys.stdout)
lg.set_debug_level(20)
settings.init()
settings.update_proxy_settings()
if sys.argv.count('receive'):
start_receiving()
# global _Contacts
# _Contacts['http://p2p-id.ru/veselin.xml'] = ('127.0.0.1', 9122)
elif sys.argv.count('send'):
start_sending(port=int(sys.argv[2]))
send_file(sys.argv[3], sys.argv[4])
else:
usage()
return
reactor.run()
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
| agpl-3.0 | -5,036,903,023,932,048,000 | 31.088167 | 132 | 0.542878 | false |
SteveDiamond/cvxpy | cvxpy/atoms/min.py | 2 | 2912 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.atom import Atom
from cvxpy.atoms.axis_atom import AxisAtom
import numpy as np
class min(AxisAtom):
""":math:`\\min{i,j}\\{X_{i,j}\\}`.
"""
def __init__(self, x, axis=None, keepdims=False):
super(min, self).__init__(x, axis=axis, keepdims=keepdims)
@Atom.numpy_numeric
def numeric(self, values):
"""Returns the smallest entry in x.
"""
return values[0].min(axis=self.axis, keepdims=self.keepdims)
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
return self._axis_grad(values)
def _column_grad(self, value):
"""Gives the (sub/super)gradient of the atom w.r.t. a column argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
value: A numeric value for a column.
Returns:
A NumPy ndarray or None.
"""
# Grad: 1 for a largest index.
value = np.array(value).ravel(order='F')
idx = np.argmin(value)
D = np.zeros((value.size, 1))
D[idx] = 1
return D
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
# Same as argument.
return (self.args[0].is_nonneg(), self.args[0].is_nonpos())
def is_atom_convex(self):
"""Is the atom convex?
"""
return False
def is_atom_concave(self):
"""Is the atom concave?
"""
return True
def is_atom_log_log_convex(self):
"""Is the atom log-log convex?
"""
return False
def is_atom_log_log_concave(self):
"""Is the atom log-log concave?
"""
return True
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return True
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return False
def is_pwl(self):
"""Is the atom piecewise linear?
"""
return self.args[0].is_pwl()
| gpl-3.0 | -3,086,104,830,396,706,000 | 26.733333 | 78 | 0.603365 | false |
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/sympy/core/tests/test_numbers.py | 1 | 44922 | from __future__ import with_statement
import decimal
from sympy import (Rational, Symbol, Float, I, sqrt, oo, nan, pi, E, Integer,
S, factorial, Catalan, EulerGamma, GoldenRatio, cos, exp,
Number, zoo, log, Mul, Pow, Tuple)
from sympy.core.basic import _aresame
from sympy.core.power import integer_nthroot
from sympy.core.numbers import igcd, ilcm, igcdex, seterr, _intcache, mpf_norm
from sympy.mpmath import mpf
from sympy.utilities.pytest import XFAIL, slow, raises
from sympy import mpmath
def test_integers_cache():
python_int = 2**65 + 3175259
while python_int in _intcache or hash(python_int) in _intcache:
python_int += 1
sympy_int = Integer(python_int)
assert python_int in _intcache
assert hash(python_int) not in _intcache
assert sympy_int not in _intcache
sympy_int_int = Integer(sympy_int)
assert python_int in _intcache
assert hash(python_int) not in _intcache
assert sympy_int_int not in _intcache
sympy_hash_int = Integer(hash(python_int))
assert python_int in _intcache
assert hash(python_int) in _intcache
assert sympy_hash_int not in _intcache
def test_seterr():
seterr(divide=True)
raises(ValueError, lambda: S.Zero/S.Zero)
seterr(divide=False)
assert S.Zero / S.Zero == S.NaN
def test_mod():
x = Rational(1, 2)
y = Rational(3, 4)
z = Rational(5, 18043)
assert x % x == 0
assert x % y == 1/S(2)
assert x % z == 3/S(36086)
assert y % x == 1/S(4)
assert y % y == 0
assert y % z == 9/S(72172)
assert z % x == 5/S(18043)
assert z % y == 5/S(18043)
assert z % z == 0
a = Float(2.6)
assert (a % .2) == 0
assert (a % 2).round(15) == 0.6
assert (a % 0.5).round(15) == 0.1
# In these two tests, if the precision of m does
# not match the precision of the ans, then it is
# likely that the change made now gives an answer
# with degraded accuracy.
r = Rational(500, 41)
f = Float('.36', 3)
m = r % f
ans = Float(r % Rational(f), 3)
assert m == ans and m._prec == ans._prec
f = Float('8.36', 3)
m = f % r
ans = Float(Rational(f) % r, 3)
assert m == ans and m._prec == ans._prec
s = S.Zero
assert s % float(1) == S.Zero
# No rounding required since these numbers can be represented
# exactly.
assert Rational(3, 4) % Float(1.1) == 0.75
assert Float(1.5) % Rational(5, 4) == 0.25
assert Rational(5, 4).__rmod__(Float('1.5')) == 0.25
assert Float('1.5').__rmod__(Float('2.75')) == Float('1.25')
assert 2.75 % Float('1.5') == Float('1.25')
a = Integer(7)
b = Integer(4)
assert type(a % b) == Integer
assert a % b == Integer(3)
assert Integer(1) % Rational(2, 3) == Rational(1, 3)
assert Rational(7, 5) % Integer(1) == Rational(2, 5)
assert Integer(2) % 1.5 == 0.5
assert Integer(3).__rmod__(Integer(10)) == Integer(1)
assert Integer(10) % 4 == Integer(2)
assert 15 % Integer(4) == Integer(3)
def test_divmod():
assert divmod(S(12), S(8)) == Tuple(1, 4)
assert divmod(-S(12), S(8)) == Tuple(-2, 4)
assert divmod(S(0), S(1)) == Tuple(0, 0)
raises(ZeroDivisionError, lambda: divmod(S(0), S(0)))
raises(ZeroDivisionError, lambda: divmod(S(1), S(0)))
assert divmod(S(12), 8) == Tuple(1, 4)
assert divmod(12, S(8)) == Tuple(1, 4)
assert divmod(S("2"), S("3/2")) == Tuple(S("1"), S("1/2"))
assert divmod(S("3/2"), S("2")) == Tuple(S("0"), S("3/2"))
assert divmod(S("2"), S("3.5")) == Tuple(S("0"), S("2"))
assert divmod(S("3.5"), S("2")) == Tuple(S("1"), S("1.5"))
assert divmod(S("2"), S("1/3")) == Tuple(S("6"), S("0"))
assert divmod(S("1/3"), S("2")) == Tuple(S("0"), S("1/3"))
assert divmod(S("2"), S("0.1")) == Tuple(S("20"), S("0"))
assert divmod(S("0.1"), S("2")) == Tuple(S("0"), S("0.1"))
assert divmod(S("2"), 2) == Tuple(S("1"), S("0"))
assert divmod(2, S("2")) == Tuple(S("1"), S("0"))
assert divmod(S("2"), 1.5) == Tuple(S("1"), S("0.5"))
assert divmod(1.5, S("2")) == Tuple(S("0"), S("1.5"))
assert divmod(0.3, S("2")) == Tuple(S("0"), S("0.3"))
assert divmod(S("3/2"), S("3.5")) == Tuple(S("0"), S("3/2"))
assert divmod(S("3.5"), S("3/2")) == Tuple(S("2"), S("0.5"))
assert divmod(S("3/2"), S("1/3")) == Tuple(S("4"), Float("1/6"))
assert divmod(S("1/3"), S("3/2")) == Tuple(S("0"), S("1/3"))
assert divmod(S("3/2"), S("0.1")) == Tuple(S("15"), S("0"))
assert divmod(S("0.1"), S("3/2")) == Tuple(S("0"), S("0.1"))
assert divmod(S("3/2"), 2) == Tuple(S("0"), S("3/2"))
assert divmod(2, S("3/2")) == Tuple(S("1"), S("0.5"))
assert divmod(S("3/2"), 1.5) == Tuple(S("1"), S("0"))
assert divmod(1.5, S("3/2")) == Tuple(S("1"), S("0"))
assert divmod(S("3/2"), 0.3) == Tuple(S("5"), S("0"))
assert divmod(0.3, S("3/2")) == Tuple(S("0"), S("0.3"))
assert divmod(S("1/3"), S("3.5")) == Tuple(S("0"), S("1/3"))
assert divmod(S("3.5"), S("0.1")) == Tuple(S("35"), S("0"))
assert divmod(S("0.1"), S("3.5")) == Tuple(S("0"), S("0.1"))
assert divmod(S("3.5"), 2) == Tuple(S("1"), S("1.5"))
assert divmod(2, S("3.5")) == Tuple(S("0"), S("2"))
assert divmod(S("3.5"), 1.5) == Tuple(S("2"), S("0.5"))
assert divmod(1.5, S("3.5")) == Tuple(S("0"), S("1.5"))
assert divmod(0.3, S("3.5")) == Tuple(S("0"), S("0.3"))
assert divmod(S("0.1"), S("1/3")) == Tuple(S("0"), S("0.1"))
assert divmod(S("1/3"), 2) == Tuple(S("0"), S("1/3"))
assert divmod(2, S("1/3")) == Tuple(S("6"), S("0"))
assert divmod(S("1/3"), 1.5) == Tuple(S("0"), S("1/3"))
assert divmod(0.3, S("1/3")) == Tuple(S("0"), S("0.3"))
assert divmod(S("0.1"), 2) == Tuple(S("0"), S("0.1"))
assert divmod(2, S("0.1")) == Tuple(S("20"), S("0"))
assert divmod(S("0.1"), 1.5) == Tuple(S("0"), S("0.1"))
assert divmod(1.5, S("0.1")) == Tuple(S("15"), S("0"))
assert divmod(S("0.1"), 0.3) == Tuple(S("0"), S("0.1"))
assert str(divmod(S("2"), 0.3)) == '(6, 0.2)'
assert str(divmod(S("3.5"), S("1/3"))) == '(10, 0.166666666666667)'
assert str(divmod(S("3.5"), 0.3)) == '(11, 0.2)'
assert str(divmod(S("1/3"), S("0.1"))) == '(3, 0.0333333333333333)'
assert str(divmod(1.5, S("1/3"))) == '(4, 0.166666666666667)'
assert str(divmod(S("1/3"), 0.3)) == '(1, 0.0333333333333333)'
assert str(divmod(0.3, S("0.1"))) == '(2, 0.1)'
assert divmod(-3, S(2)) == (-2, 1)
assert divmod(S(-3), S(2)) == (-2, 1)
assert divmod(S(-3), 2) == (-2, 1)
def test_igcd():
assert igcd(0, 0) == 0
assert igcd(0, 1) == 1
assert igcd(1, 0) == 1
assert igcd(0, 7) == 7
assert igcd(7, 0) == 7
assert igcd(7, 1) == 1
assert igcd(1, 7) == 1
assert igcd(-1, 0) == 1
assert igcd(0, -1) == 1
assert igcd(-1, -1) == 1
assert igcd(-1, 7) == 1
assert igcd(7, -1) == 1
assert igcd(8, 2) == 2
assert igcd(4, 8) == 4
assert igcd(8, 16) == 8
assert igcd(7, -3) == 1
assert igcd(-7, 3) == 1
assert igcd(-7, -3) == 1
raises(ValueError, lambda: igcd(45.1, 30))
raises(ValueError, lambda: igcd(45, 30.1))
def test_ilcm():
assert ilcm(0, 0) == 0
assert ilcm(1, 0) == 0
assert ilcm(0, 1) == 0
assert ilcm(1, 1) == 1
assert ilcm(2, 1) == 2
assert ilcm(8, 2) == 8
assert ilcm(8, 6) == 24
assert ilcm(8, 7) == 56
raises(ValueError, lambda: ilcm(8.1, 7))
raises(ValueError, lambda: ilcm(8, 7.1))
def test_igcdex():
assert igcdex(2, 3) == (-1, 1, 1)
assert igcdex(10, 12) == (-1, 1, 2)
assert igcdex(100, 2004) == (-20, 1, 4)
def _strictly_equal(a, b):
return (a.p, a.q, type(a.p), type(a.q)) == \
(b.p, b.q, type(b.p), type(b.q))
def _test_rational_new(cls):
"""
Tests that are common between Integer and Rational.
"""
assert cls(0) is S.Zero
assert cls(1) is S.One
assert cls(-1) is S.NegativeOne
# These look odd, but are similar to int():
assert cls('1') is S.One
assert cls(u'-1') is S.NegativeOne
i = Integer(10)
assert _strictly_equal(i, cls('10'))
assert _strictly_equal(i, cls(u'10'))
assert _strictly_equal(i, cls(10L))
assert _strictly_equal(i, cls(i))
raises(TypeError, lambda: cls(Symbol('x')))
def test_Integer_new():
"""
Test for Integer constructor
"""
_test_rational_new(Integer)
assert _strictly_equal(Integer(0.9), S.Zero)
assert _strictly_equal(Integer(10.5), Integer(10))
raises(ValueError, lambda: Integer("10.5"))
assert Integer(Rational('1.' + '9'*20)) == 1
def test_Rational_new():
""""
Test for Rational constructor
"""
_test_rational_new(Rational)
n1 = Rational(1, 2)
assert n1 == Rational(Integer(1), 2)
assert n1 == Rational(Integer(1), Integer(2))
assert n1 == Rational(1, Integer(2))
assert n1 == Rational(Rational(1, 2))
assert 1 == Rational(n1, n1)
assert Rational(3, 2) == Rational(Rational(1, 2), Rational(1, 3))
assert Rational(3, 1) == Rational(1, Rational(1, 3))
n3_4 = Rational(3, 4)
assert Rational('3/4') == n3_4
assert -Rational('-3/4') == n3_4
assert Rational('.76').limit_denominator(4) == n3_4
assert Rational(19, 25).limit_denominator(4) == n3_4
assert Rational('19/25').limit_denominator(4) == n3_4
assert Rational(1.0, 3) == Rational(1, 3)
assert Rational(1, 3.0) == Rational(1, 3)
assert Rational(Float(0.5)) == Rational(1, 2)
assert Rational('1e2/1e-2') == Rational(10000)
assert Rational(-1, 0) == S.NegativeInfinity
assert Rational(1, 0) == S.Infinity
raises(TypeError, lambda: Rational('3**3'))
raises(TypeError, lambda: Rational('1/2 + 2/3'))
# handle fractions.Fraction instances
try:
import fractions
assert Rational(fractions.Fraction(1, 2)) == Rational(1, 2)
except ImportError:
pass
def test_Number_new():
""""
Test for Number constructor
"""
# Expected behavior on numbers and strings
assert Number(1) is S.One
assert Number(2).__class__ is Integer
assert Number(-622).__class__ is Integer
assert Number(5, 3).__class__ is Rational
assert Number(5.3).__class__ is Float
assert Number('1') is S.One
assert Number('2').__class__ is Integer
assert Number('-622').__class__ is Integer
assert Number('5/3').__class__ is Rational
assert Number('5.3').__class__ is Float
raises(ValueError, lambda: Number('cos'))
raises(TypeError, lambda: Number(cos))
a = Rational(3, 5)
assert Number(a) is a # Check idempotence on Numbers
def test_Rational_cmp():
n1 = Rational(1, 4)
n2 = Rational(1, 3)
n3 = Rational(2, 4)
n4 = Rational(2, -4)
n5 = Rational(0)
n6 = Rational(1)
n7 = Rational(3)
n8 = Rational(-3)
assert n8 < n5
assert n5 < n6
assert n6 < n7
assert n8 < n7
assert n7 > n8
assert (n1 + 1)**n2 < 2
assert ((n1 + n6)/n7) < 1
assert n4 < n3
assert n2 < n3
assert n1 < n2
assert n3 > n1
assert not n3 < n1
assert not (Rational(-1) > 0)
assert Rational(-1) < 0
def test_Float():
def eq(a, b):
t = Float("1.0E-15")
return (-t < a - b < t)
a = Float(2) ** Float(3)
assert eq(a.evalf(), Float(8))
assert eq((pi ** -1).evalf(), Float("0.31830988618379067"))
a = Float(2) ** Float(4)
assert eq(a.evalf(), Float(16))
assert (S(.3) == S(.5)) is False
x_str = Float((0, '13333333333333', -52, 53))
x2_str = Float((0, '26666666666666', -53, 53))
x_hex = Float((0, 0x13333333333333L, -52, 53))
x_dec = Float((0, 5404319552844595L, -52, 53))
x2_hex = Float((0, 0x13333333333333L*2, -53, 53))
assert x_str == x_hex == x_dec == x2_hex == Float(1.2)
# x2_str and 1.2 are superficially the same
assert str(x2_str) == str(Float(1.2))
# but are different at the mpf level
assert Float(1.2)._mpf_ == (0, 5404319552844595L, -52, 53)
assert x2_str._mpf_ == (0, 10808639105689190L, -53, 53)
assert Float((0, 0L, -123, -1)) == Float('nan')
assert Float((0, 0L, -456, -2)) == Float('inf') == Float('+inf')
assert Float((1, 0L, -789, -3)) == Float('-inf')
raises(ValueError, lambda: Float((0, 7, 1, 3), ''))
assert Float('+inf').is_bounded is False
assert Float('+inf').is_finite is False
assert Float('+inf').is_negative is False
assert Float('+inf').is_positive is True
assert Float('+inf').is_unbounded is True
assert Float('+inf').is_zero is False
assert Float('-inf').is_bounded is False
assert Float('-inf').is_finite is False
assert Float('-inf').is_negative is True
assert Float('-inf').is_positive is False
assert Float('-inf').is_unbounded is True
assert Float('-inf').is_zero is False
assert Float('0.0').is_bounded is True
assert Float('0.0').is_finite is False
assert Float('0.0').is_negative is False
assert Float('0.0').is_positive is False
assert Float('0.0').is_unbounded is False
assert Float('0.0').is_zero is True
# do not automatically evalf
def teq(a):
assert (a.evalf() == a) is False
assert (a.evalf() != a) is True
assert (a == a.evalf()) is False
assert (a != a.evalf()) is True
teq(pi)
teq(2*pi)
teq(cos(0.1, evaluate=False))
i = 12345678901234567890
assert _aresame(Float(12, ''), Float('12', ''))
assert _aresame(Float(Integer(i), ''), Float(i, ''))
assert _aresame(Float(i, ''), Float(str(i), 20))
assert not _aresame(Float(str(i)), Float(i, ''))
# inexact floats (repeating binary = denom not multiple of 2)
# cannot have precision greater than 15
assert Float(.125, 22) == .125
assert Float(2.0, 22) == 2
assert float(Float('.12500000000000001', '')) == .125
raises(ValueError, lambda: Float(.12500000000000001, ''))
# allow spaces
Float('123 456.123 456') == Float('123456.123456')
Integer('123 456') == Integer('123456')
Rational('123 456.123 456') == Rational('123456.123456')
assert Float(' .3e2') == Float('0.3e2')
# allow auto precision detection
assert Float('.1', '') == Float(.1, 1)
assert Float('.125', '') == Float(.125, 3)
assert Float('.100', '') == Float(.1, 3)
assert Float('2.0', '') == Float('2', 2)
raises(ValueError, lambda: Float("12.3d-4", ""))
raises(ValueError, lambda: Float(12.3, ""))
raises(ValueError, lambda: Float('.'))
raises(ValueError, lambda: Float('-.'))
zero = Float('0.0')
assert Float('-0') == zero
assert Float('.0') == zero
assert Float('-.0') == zero
assert Float('-0.0') == zero
assert Float(0.0) == zero
assert Float(0) == zero
assert Float(0, '') == Float('0', '')
assert Float(1) == Float(1.0)
assert Float(S.Zero) == zero
assert Float(S.One) == Float(1.0)
assert Float(decimal.Decimal('0.1'), 3) == Float('.1', 3)
def test_Float_eval():
a = Float(3.2)
assert (a**2).is_Float
def test_Float_issue_2107():
a = Float(0.1, 10)
b = Float("0.1", 10)
assert a - a == 0
assert a + (-a) == 0
assert S.Zero + a - a == 0
assert S.Zero + a + (-a) == 0
assert b - b == 0
assert b + (-b) == 0
assert S.Zero + b - b == 0
assert S.Zero + b + (-b) == 0
def test_Infinity():
assert oo != 1
assert 1*oo == oo
assert 1 != oo
assert oo != -oo
assert oo != Symbol("x")**3
assert oo + 1 == oo
assert 2 + oo == oo
assert 3*oo + 2 == oo
assert S.Half**oo == 0
assert S.Half**(-oo) == oo
assert -oo*3 == -oo
assert oo + oo == oo
assert -oo + oo*(-5) == -oo
assert 1/oo == 0
assert 1/(-oo) == 0
assert 8/oo == 0
assert oo % 2 == nan
assert 2 % oo == nan
assert oo/oo == nan
assert oo/-oo == nan
assert -oo/oo == nan
assert -oo/-oo == nan
assert oo - oo == nan
assert oo - -oo == oo
assert -oo - oo == -oo
assert -oo - -oo == nan
assert oo + -oo == nan
assert -oo + oo == nan
assert oo + oo == oo
assert -oo + oo == nan
assert oo + -oo == nan
assert -oo + -oo == -oo
assert oo*oo == oo
assert -oo*oo == -oo
assert oo*-oo == -oo
assert -oo*-oo == oo
assert oo/0 == oo
assert -oo/0 == -oo
assert 0/oo == 0
assert 0/-oo == 0
assert oo*0 == nan
assert -oo*0 == nan
assert 0*oo == nan
assert 0*-oo == nan
assert oo + 0 == oo
assert -oo + 0 == -oo
assert 0 + oo == oo
assert 0 + -oo == -oo
assert oo - 0 == oo
assert -oo - 0 == -oo
assert 0 - oo == -oo
assert 0 - -oo == oo
assert oo/2 == oo
assert -oo/2 == -oo
assert oo/-2 == -oo
assert -oo/-2 == oo
assert oo*2 == oo
assert -oo*2 == -oo
assert oo*-2 == -oo
assert 2/oo == 0
assert 2/-oo == 0
assert -2/oo == 0
assert -2/-oo == 0
assert 2*oo == oo
assert 2*-oo == -oo
assert -2*oo == -oo
assert -2*-oo == oo
assert 2 + oo == oo
assert 2 - oo == -oo
assert -2 + oo == oo
assert -2 - oo == -oo
assert 2 + -oo == -oo
assert 2 - -oo == oo
assert -2 + -oo == -oo
assert -2 - -oo == oo
assert S(2) + oo == oo
assert S(2) - oo == -oo
assert oo/I == -oo*I
assert -oo/I == oo*I
assert oo*float(1) == Float('inf') and (oo*float(1)).is_Float
assert -oo*float(1) == Float('-inf') and (-oo*float(1)).is_Float
assert oo/float(1) == Float('inf') and (oo/float(1)).is_Float
assert -oo/float(1) == Float('-inf') and (-oo/float(1)).is_Float
assert oo*float(-1) == Float('-inf') and (oo*float(-1)).is_Float
assert -oo*float(-1) == Float('inf') and (-oo*float(-1)).is_Float
assert oo/float(-1) == Float('-inf') and (oo/float(-1)).is_Float
assert -oo/float(-1) == Float('inf') and (-oo/float(-1)).is_Float
assert oo + float(1) == Float('inf') and (oo + float(1)).is_Float
assert -oo + float(1) == Float('-inf') and (-oo + float(1)).is_Float
assert oo - float(1) == Float('inf') and (oo - float(1)).is_Float
assert -oo - float(1) == Float('-inf') and (-oo - float(1)).is_Float
assert float(1)*oo == Float('inf') and (float(1)*oo).is_Float
assert float(1)*-oo == Float('-inf') and (float(1)*-oo).is_Float
assert float(1)/oo == 0
assert float(1)/-oo == 0
assert float(-1)*oo == Float('-inf') and (float(-1)*oo).is_Float
assert float(-1)*-oo == Float('inf') and (float(-1)*-oo).is_Float
assert float(-1)/oo == 0
assert float(-1)/-oo == 0
assert float(1) + oo == Float('inf')
assert float(1) + -oo == Float('-inf')
assert float(1) - oo == Float('-inf')
assert float(1) - -oo == Float('inf')
assert Float('nan') == nan
assert nan*1.0 == nan
assert -1.0*nan == nan
assert nan*oo == nan
assert nan*-oo == nan
assert nan/oo == nan
assert nan/-oo == nan
assert nan + oo == nan
assert nan + -oo == nan
assert nan - oo == nan
assert nan - -oo == nan
assert -oo * S.Zero == nan
assert oo*nan == nan
assert -oo*nan == nan
assert oo/nan == nan
assert -oo/nan == nan
assert oo + nan == nan
assert -oo + nan == nan
assert oo - nan == nan
assert -oo - nan == nan
assert S.Zero * oo == nan
assert oo.is_Rational is False
assert isinstance(oo, Rational) is False
assert S.One/oo == 0
assert -S.One/oo == 0
assert S.One/-oo == 0
assert -S.One/-oo == 0
assert S.One*oo == oo
assert -S.One*oo == -oo
assert S.One*-oo == -oo
assert -S.One*-oo == oo
assert S.One/nan == nan
assert S.One - -oo == oo
assert S.One + nan == nan
assert S.One - nan == nan
assert nan - S.One == nan
assert nan/S.One == nan
assert -oo - S.One == -oo
def test_Infinity_2():
x = Symbol('x')
assert oo*x != oo
assert oo*(pi - 1) == oo
assert oo*(1 - pi) == -oo
assert (-oo)*x != -oo
assert (-oo)*(pi - 1) == -oo
assert (-oo)*(1 - pi) == oo
assert (-1)**S.NaN is S.NaN
assert oo - Float('inf') is S.NaN
assert oo + Float('-inf') is S.NaN
assert oo*0 is S.NaN
assert oo/Float('inf') is S.NaN
assert oo/Float('-inf') is S.NaN
assert oo**S.NaN is S.NaN
assert -oo + Float('inf') is S.NaN
assert -oo - Float('-inf') is S.NaN
assert -oo*S.NaN is S.NaN
assert -oo*0 is S.NaN
assert -oo/Float('inf') is S.NaN
assert -oo/Float('-inf') is S.NaN
assert -oo/S.NaN is S.NaN
assert abs(-oo) == oo
assert all((-oo)**i is S.NaN for i in (oo, -oo, S.NaN))
assert (-oo)**3 == -oo
assert (-oo)**2 == oo
assert abs(S.ComplexInfinity) == oo
def test_Mul_Infinity_Zero():
assert 0*Float('inf') == nan
assert 0*Float('-inf') == nan
assert 0*Float('inf') == nan
assert 0*Float('-inf') == nan
assert Float('inf')*0 == nan
assert Float('-inf')*0 == nan
assert Float('inf')*0 == nan
assert Float('-inf')*0 == nan
assert Float(0)*Float('inf') == nan
assert Float(0)*Float('-inf') == nan
assert Float(0)*Float('inf') == nan
assert Float(0)*Float('-inf') == nan
assert Float('inf')*Float(0) == nan
assert Float('-inf')*Float(0) == nan
assert Float('inf')*Float(0) == nan
assert Float('-inf')*Float(0) == nan
def test_Div_By_Zero():
assert 1/S(0) == oo
assert 1/Float(0) == Float('inf')
assert 0/S(0) == nan
assert 0/Float(0) == nan
assert S(0)/0 == nan
assert Float(0)/0 == nan
assert -1/S(0) == -oo
assert -1/Float(0) == Float('-inf')
def test_Infinity_inequations():
assert oo > pi
assert not (oo < pi)
assert exp(-3) < oo
assert Float('+inf') > pi
assert not (Float('+inf') < pi)
assert exp(-3) < Float('+inf')
def test_NaN():
assert nan == nan
assert nan != 1
assert 1*nan == nan
assert 1 != nan
assert nan == -nan
assert oo != Symbol("x")**3
assert nan + 1 == nan
assert 2 + nan == nan
assert 3*nan + 2 == nan
assert -nan*3 == nan
assert nan + nan == nan
assert -nan + nan*(-5) == nan
assert 1/nan == nan
assert 1/(-nan) == nan
assert 8/nan == nan
assert not nan > 0
assert not nan < 0
assert not nan >= 0
assert not nan <= 0
assert not 0 < nan
assert not 0 > nan
assert not 0 <= nan
assert not 0 >= nan
assert S.One + nan == nan
assert S.One - nan == nan
assert S.One*nan == nan
assert S.One/nan == nan
assert nan - S.One == nan
assert nan*S.One == nan
assert nan + S.One == nan
assert nan/S.One == nan
assert nan**0 == 1 # as per IEEE 754
assert 1**nan == 1 # as per IEEE 754
def test_special_numbers():
assert isinstance(S.NaN, Number) is True
assert isinstance(S.Infinity, Number) is True
assert isinstance(S.NegativeInfinity, Number) is True
assert S.NaN.is_number is True
assert S.Infinity.is_number is True
assert S.NegativeInfinity.is_number is True
assert isinstance(S.NaN, Rational) is False
assert isinstance(S.Infinity, Rational) is False
assert isinstance(S.NegativeInfinity, Rational) is False
assert S.NaN.is_rational is not True
assert S.Infinity.is_rational is not True
assert S.NegativeInfinity.is_rational is not True
def test_powers():
assert integer_nthroot(1, 2) == (1, True)
assert integer_nthroot(1, 5) == (1, True)
assert integer_nthroot(2, 1) == (2, True)
assert integer_nthroot(2, 2) == (1, False)
assert integer_nthroot(2, 5) == (1, False)
assert integer_nthroot(4, 2) == (2, True)
assert integer_nthroot(123**25, 25) == (123, True)
assert integer_nthroot(123**25 + 1, 25) == (123, False)
assert integer_nthroot(123**25 - 1, 25) == (122, False)
assert integer_nthroot(1, 1) == (1, True)
assert integer_nthroot(0, 1) == (0, True)
assert integer_nthroot(0, 3) == (0, True)
assert integer_nthroot(10000, 1) == (10000, True)
assert integer_nthroot(4, 2) == (2, True)
assert integer_nthroot(16, 2) == (4, True)
assert integer_nthroot(26, 2) == (5, False)
assert integer_nthroot(1234567**7, 7) == (1234567, True)
assert integer_nthroot(1234567**7 + 1, 7) == (1234567, False)
assert integer_nthroot(1234567**7 - 1, 7) == (1234566, False)
b = 25**1000
assert integer_nthroot(b, 1000) == (25, True)
assert integer_nthroot(b + 1, 1000) == (25, False)
assert integer_nthroot(b - 1, 1000) == (24, False)
c = 10**400
c2 = c**2
assert integer_nthroot(c2, 2) == (c, True)
assert integer_nthroot(c2 + 1, 2) == (c, False)
assert integer_nthroot(c2 - 1, 2) == (c - 1, False)
assert integer_nthroot(2, 10**10) == (1, False)
p, r = integer_nthroot(int(factorial(10000)), 100)
assert p % (10**10) == 5322420655
assert not r
# Test that this is fast
assert integer_nthroot(2, 10**10) == (1, False)
def test_integer_nthroot_overflow():
assert integer_nthroot(10**(50*50), 50) == (10**50, True)
assert integer_nthroot(10**100000, 10000) == (10**10, True)
def test_powers_Integer():
"""Test Integer._eval_power"""
# check infinity
assert S(1) ** S.Infinity == 1
assert S(-1)** S.Infinity == S.NaN
assert S(2) ** S.Infinity == S.Infinity
assert S(-2)** S.Infinity == S.Infinity + S.Infinity * S.ImaginaryUnit
assert S(0) ** S.Infinity == 0
# check Nan
assert S(1) ** S.NaN == S.One
assert S(-1) ** S.NaN == S.NaN
# check for exact roots
assert S(-1) ** Rational(6, 5) == - (-1)**(S(1)/5)
assert sqrt(S(4)) == 2
assert sqrt(S(-4)) == I * 2
assert S(16) ** Rational(1, 4) == 2
assert S(-16) ** Rational(1, 4) == 2 * (-1)**Rational(1, 4)
assert S(9) ** Rational(3, 2) == 27
assert S(-9) ** Rational(3, 2) == -27*I
assert S(27) ** Rational(2, 3) == 9
assert S(-27) ** Rational(2, 3) == 9 * (S(-1) ** Rational(2, 3))
assert (-2) ** Rational(-2, 1) == Rational(1, 4)
# not exact roots
assert sqrt(-3) == I*sqrt(3)
assert (3) ** (S(3)/2) == 3 * sqrt(3)
assert (-3) ** (S(3)/2) == - 3 * sqrt(-3)
assert (-3) ** (S(5)/2) == 9 * I * sqrt(3)
assert (-3) ** (S(7)/2) == - I * 27 * sqrt(3)
assert (2) ** (S(3)/2) == 2 * sqrt(2)
assert (2) ** (S(-3)/2) == sqrt(2) / 4
assert (81) ** (S(2)/3) == 9 * (S(3) ** (S(2)/3))
assert (-81) ** (S(2)/3) == 9 * (S(-3) ** (S(2)/3))
assert (-3) ** Rational(-7, 3) == \
-(-1)**Rational(2, 3)*3**Rational(2, 3)/27
assert (-3) ** Rational(-2, 3) == \
-(-1)**Rational(1, 3)*3**Rational(1, 3)/3
# join roots
assert sqrt(6) + sqrt(24) == 3*sqrt(6)
assert sqrt(2) * sqrt(3) == sqrt(6)
# separate symbols & constansts
x = Symbol("x")
assert sqrt(49 * x) == 7 * sqrt(x)
assert sqrt((3 - sqrt(pi)) ** 2) == 3 - sqrt(pi)
# check that it is fast for big numbers
assert (2**64 + 1) ** Rational(4, 3)
assert (2**64 + 1) ** Rational(17, 25)
# negative rational power and negative base
assert (-3) ** Rational(-7, 3) == \
-(-1)**Rational(2, 3)*3**Rational(2, 3)/27
assert (-3) ** Rational(-2, 3) == \
-(-1)**Rational(1, 3)*3**Rational(1, 3)/3
assert S(1234).factors() == {617: 1, 2: 1}
assert Rational(2*3, 3*5*7).factors() == {2: 1, 5: -1, 7: -1}
# test that eval_power factors numbers bigger than
# the current limit in factor_trial_division (2**15)
from sympy import nextprime
n = nextprime(2**15)
assert sqrt(n**2) == n
assert sqrt(n**3) == n*sqrt(n)
assert sqrt(4*n) == 2*sqrt(n)
# check that factors of base with powers sharing gcd with power are removed
assert (2**4*3)**Rational(1, 6) == 2**Rational(2, 3)*3**Rational(1, 6)
assert (2**4*3)**Rational(5, 6) == 8*2**Rational(1, 3)*3**Rational(5, 6)
# check that bases sharing a gcd are exptracted
assert 2**Rational(1, 3)*3**Rational(1, 4)*6**Rational(1, 5) == \
2**Rational(8, 15)*3**Rational(9, 20)
assert sqrt(8)*24**Rational(1, 3)*6**Rational(1, 5) == \
4*2**Rational(7, 10)*3**Rational(8, 15)
assert sqrt(8)*(-24)**Rational(1, 3)*(-6)**Rational(1, 5) == \
4*(-3)**Rational(8, 15)*2**Rational(7, 10)
assert 2**Rational(1, 3)*2**Rational(8, 9) == 2*2**Rational(2, 9)
assert 2**Rational(2, 3)*6**Rational(1, 3) == 2*3**Rational(1, 3)
assert 2**Rational(2, 3)*6**Rational(8, 9) == \
2*2**Rational(5, 9)*3**Rational(8, 9)
assert (-2)**Rational(2, S(3))*(-4)**Rational(1, S(3)) == -2*2**Rational(1, 3)
assert 3*Pow(3, 2, evaluate=False) == 3**3
assert 3*Pow(3, -1/S(3), evaluate=False) == 3**(2/S(3))
assert (-2)**(1/S(3))*(-3)**(1/S(4))*(-5)**(5/S(6)) == \
-(-1)**Rational(5, 12)*2**Rational(1, 3)*3**Rational(1, 4) * \
5**Rational(5, 6)
assert Integer(-2)**Symbol('', even=True) == \
Integer(2)**Symbol('', even=True)
assert (-1)**Float(.5) == 1.0*I
def test_powers_Rational():
"""Test Rational._eval_power"""
# check infinity
assert Rational(1, 2) ** S.Infinity == 0
assert Rational(3, 2) ** S.Infinity == S.Infinity
assert Rational(-1, 2) ** S.Infinity == 0
assert Rational(-3, 2) ** S.Infinity == \
S.Infinity + S.Infinity * S.ImaginaryUnit
# check Nan
assert Rational(3, 4) ** S.NaN == S.NaN
assert Rational(-2, 3) ** S.NaN == S.NaN
# exact roots on numerator
assert sqrt(Rational(4, 3)) == 2 * sqrt(3) / 3
assert Rational(4, 3) ** Rational(3, 2) == 8 * sqrt(3) / 9
assert sqrt(Rational(-4, 3)) == I * 2 * sqrt(3) / 3
assert Rational(-4, 3) ** Rational(3, 2) == - I * 8 * sqrt(3) / 9
assert Rational(27, 2) ** Rational(1, 3) == 3 * (2 ** Rational(2, 3)) / 2
assert Rational(5**3, 8**3) ** Rational(4, 3) == Rational(5**4, 8**4)
# exact root on denominator
assert sqrt(Rational(1, 4)) == Rational(1, 2)
assert sqrt(Rational(1, -4)) == I * Rational(1, 2)
assert sqrt(Rational(3, 4)) == sqrt(3) / 2
assert sqrt(Rational(3, -4)) == I * sqrt(3) / 2
assert Rational(5, 27) ** Rational(1, 3) == (5 ** Rational(1, 3)) / 3
# not exact roots
assert sqrt(Rational(1, 2)) == sqrt(2) / 2
assert sqrt(Rational(-4, 7)) == I * sqrt(Rational(4, 7))
assert Rational(-3, 2)**Rational(-7, 3) == \
-4*(-1)**Rational(2, 3)*2**Rational(1, 3)*3**Rational(2, 3)/27
assert Rational(-3, 2)**Rational(-2, 3) == \
-(-1)**Rational(1, 3)*2**Rational(2, 3)*3**Rational(1, 3)/3
# negative integer power and negative rational base
assert Rational(-2, 3) ** Rational(-2, 1) == Rational(9, 4)
a = Rational(1, 10)
assert a**Float(a, 2) == Float(a, 2)**Float(a, 2)
assert Rational(-2, 3)**Symbol('', even=True) == \
Rational(2, 3)**Symbol('', even=True)
def test_powers_Float():
assert str((S('-1/10')**S('3/10')).n()) == str(Float(-.1)**(.3))
def test_abs1():
assert Rational(1, 6) != Rational(-1, 6)
assert abs(Rational(1, 6)) == abs(Rational(-1, 6))
def test_accept_int():
assert Float(4) == 4
def test_dont_accept_str():
assert Float("0.2") != "0.2"
assert not (Float("0.2") == "0.2")
def test_int():
a = Rational(5)
assert int(a) == 5
a = Rational(9, 10)
assert int(a) == int(-a) == 0
assert 1/(-1)**Rational(2, 3) == -(-1)**Rational(1, 3)
assert int(pi) == 3
assert int(E) == 2
assert int(GoldenRatio) == 1
def test_long():
a = Rational(5)
assert long(a) == 5
a = Rational(9, 10)
assert long(a) == long(-a) == 0
a = Integer(2**100)
assert long(a) == a
assert long(pi) == 3
assert long(E) == 2
assert long(GoldenRatio) == 1
def test_real_bug():
x = Symbol("x")
assert str(2.0*x*x) in ["(2.0*x)*x", "2.0*x**2", "2.00000000000000*x**2"]
assert str(2.1*x*x) != "(2.0*x)*x"
def test_bug_sqrt():
assert ((sqrt(Rational(2)) + 1)*(sqrt(Rational(2)) - 1)).expand() == 1
def test_pi_Pi():
"Test, that pi (instance) is imported, but Pi (class) is not"
from sympy import pi
with raises(ImportError):
from sympy import Pi
def test_no_len():
# there should be no len for numbers
raises(TypeError, lambda: len(Rational(2)))
raises(TypeError, lambda: len(Rational(2, 3)))
raises(TypeError, lambda: len(Integer(2)))
def test_issue222():
assert sqrt(Rational(1, 5)) == sqrt(Rational(1, 5))
assert 5 * sqrt(Rational(1, 5)) == sqrt(5)
def test_issue593():
assert ((-1)**Rational(1, 6)).expand(complex=True) == I/2 + sqrt(3)/2
assert ((-5)**Rational(1, 6)).expand(complex=True) == \
5**Rational(1, 6)*I/2 + 5**Rational(1, 6)*sqrt(3)/2
assert ((-64)**Rational(1, 6)).expand(complex=True) == I + sqrt(3)
def test_issue324():
x = Symbol("x")
assert sqrt(x - 1).as_base_exp() == (x - 1, S.Half)
assert sqrt(x - 1) != I*sqrt(1 - x)
def test_issue350():
x = Symbol("x", real=True)
assert sqrt(x**2) == abs(x)
assert sqrt(x - 1).subs(x, 5) == 2
def test_Integer_factors():
def F(i):
return Integer(i).factors()
assert F(1) == {1: 1}
assert F(2) == {2: 1}
assert F(3) == {3: 1}
assert F(4) == {2: 2}
assert F(5) == {5: 1}
assert F(6) == {2: 1, 3: 1}
assert F(7) == {7: 1}
assert F(8) == {2: 3}
assert F(9) == {3: 2}
assert F(10) == {2: 1, 5: 1}
assert F(11) == {11: 1}
assert F(12) == {2: 2, 3: 1}
assert F(13) == {13: 1}
assert F(14) == {2: 1, 7: 1}
assert F(15) == {3: 1, 5: 1}
assert F(16) == {2: 4}
assert F(17) == {17: 1}
assert F(18) == {2: 1, 3: 2}
assert F(19) == {19: 1}
assert F(20) == {2: 2, 5: 1}
assert F(21) == {3: 1, 7: 1}
assert F(22) == {2: 1, 11: 1}
assert F(23) == {23: 1}
assert F(24) == {2: 3, 3: 1}
assert F(25) == {5: 2}
assert F(26) == {2: 1, 13: 1}
assert F(27) == {3: 3}
assert F(28) == {2: 2, 7: 1}
assert F(29) == {29: 1}
assert F(30) == {2: 1, 3: 1, 5: 1}
assert F(31) == {31: 1}
assert F(32) == {2: 5}
assert F(33) == {3: 1, 11: 1}
assert F(34) == {2: 1, 17: 1}
assert F(35) == {5: 1, 7: 1}
assert F(36) == {2: 2, 3: 2}
assert F(37) == {37: 1}
assert F(38) == {2: 1, 19: 1}
assert F(39) == {3: 1, 13: 1}
assert F(40) == {2: 3, 5: 1}
assert F(41) == {41: 1}
assert F(42) == {2: 1, 3: 1, 7: 1}
assert F(43) == {43: 1}
assert F(44) == {2: 2, 11: 1}
assert F(45) == {3: 2, 5: 1}
assert F(46) == {2: 1, 23: 1}
assert F(47) == {47: 1}
assert F(48) == {2: 4, 3: 1}
assert F(49) == {7: 2}
assert F(50) == {2: 1, 5: 2}
assert F(51) == {3: 1, 17: 1}
def test_Rational_factors():
def F(p, q, visual=None):
return Rational(p, q).factors(visual=visual)
assert F(2, 3) == {2: 1, 3: -1}
assert F(2, 9) == {2: 1, 3: -2}
assert F(2, 15) == {2: 1, 3: -1, 5: -1}
assert F(6, 10) == {3: 1, 5: -1}
assert str(F(12, 1, visual=True)) == '2**2*3**1'
assert str(F(1, 1, visual=True)) == '1'
assert str(F(25, 14, visual=True)) == '5**2/(2*7)'
assert str(F(-25, 14*9, visual=True)) == '-5**2/(2*3**2*7)'
def test_issue1008():
assert pi*(E + 10) + pi*(-E - 10) != 0
assert pi*(E + 10**10) + pi*(-E - 10**10) != 0
assert pi*(E + 10**20) + pi*(-E - 10**20) != 0
assert pi*(E + 10**80) + pi*(-E - 10**80) != 0
assert (pi*(E + 10) + pi*(-E - 10)).expand() == 0
assert (pi*(E + 10**10) + pi*(-E - 10**10)).expand() == 0
assert (pi*(E + 10**20) + pi*(-E - 10**20)).expand() == 0
assert (pi*(E + 10**80) + pi*(-E - 10**80)).expand() == 0
def test_IntegerInteger():
a = Integer(4)
b = Integer(a)
assert a == b
def test_Rational_gcd_lcm_cofactors():
assert Integer(4).gcd(2) == Integer(2)
assert Integer(4).lcm(2) == Integer(4)
assert Integer(4).gcd(Integer(2)) == Integer(2)
assert Integer(4).lcm(Integer(2)) == Integer(4)
assert Integer(4).gcd(3) == Integer(1)
assert Integer(4).lcm(3) == Integer(12)
assert Integer(4).gcd(Integer(3)) == Integer(1)
assert Integer(4).lcm(Integer(3)) == Integer(12)
assert Rational(4, 3).gcd(2) == Rational(2, 3)
assert Rational(4, 3).lcm(2) == Integer(4)
assert Rational(4, 3).gcd(Integer(2)) == Rational(2, 3)
assert Rational(4, 3).lcm(Integer(2)) == Integer(4)
assert Integer(4).gcd(Rational(2, 9)) == Rational(2, 9)
assert Integer(4).lcm(Rational(2, 9)) == Integer(4)
assert Rational(4, 3).gcd(Rational(2, 9)) == Rational(2, 9)
assert Rational(4, 3).lcm(Rational(2, 9)) == Rational(4, 3)
assert Rational(4, 5).gcd(Rational(2, 9)) == Rational(2, 45)
assert Rational(4, 5).lcm(Rational(2, 9)) == Integer(4)
assert Integer(4).cofactors(2) == (Integer(2), Integer(2), Integer(1))
assert Integer(4).cofactors(Integer(2)) == \
(Integer(2), Integer(2), Integer(1))
assert Integer(4).gcd(Float(2.0)) == S.One
assert Integer(4).lcm(Float(2.0)) == Float(8.0)
assert Integer(4).cofactors(Float(2.0)) == (S.One, Integer(4), Float(2.0))
assert Rational(1, 2).gcd(Float(2.0)) == S.One
assert Rational(1, 2).lcm(Float(2.0)) == Float(1.0)
assert Rational(1, 2).cofactors(Float(2.0)) == \
(S.One, Rational(1, 2), Float(2.0))
def test_Float_gcd_lcm_cofactors():
assert Float(2.0).gcd(Integer(4)) == S.One
assert Float(2.0).lcm(Integer(4)) == Float(8.0)
assert Float(2.0).cofactors(Integer(4)) == (S.One, Float(2.0), Integer(4))
assert Float(2.0).gcd(Rational(1, 2)) == S.One
assert Float(2.0).lcm(Rational(1, 2)) == Float(1.0)
assert Float(2.0).cofactors(Rational(1, 2)) == \
(S.One, Float(2.0), Rational(1, 2))
def test_issue1512():
assert abs(pi._evalf(50) - 3.14159265358979) < 1e-10
assert abs(E._evalf(50) - 2.71828182845905) < 1e-10
assert abs(Catalan._evalf(50) - 0.915965594177219) < 1e-10
assert abs(EulerGamma._evalf(50) - 0.577215664901533) < 1e-10
assert abs(GoldenRatio._evalf(50) - 1.61803398874989) < 1e-10
x = Symbol("x")
assert (pi + x).evalf() == pi.evalf() + x
assert (E + x).evalf() == E.evalf() + x
assert (Catalan + x).evalf() == Catalan.evalf() + x
assert (EulerGamma + x).evalf() == EulerGamma.evalf() + x
assert (GoldenRatio + x).evalf() == GoldenRatio.evalf() + x
def test_conversion_to_mpmath():
assert mpmath.mpmathify(Integer(1)) == mpmath.mpf(1)
assert mpmath.mpmathify(Rational(1, 2)) == mpmath.mpf(0.5)
assert mpmath.mpmathify(Float('1.23', 15)) == mpmath.mpf('1.23')
def test_relational():
# real
x = S(.1)
assert (x != cos) is True
assert (x == cos) is False
# rational
x = Rational(1, 3)
assert (x != cos) is True
assert (x == cos) is False
# integer defers to rational so these tests are omitted
# number symbol
x = pi
assert (x != cos) is True
assert (x == cos) is False
def test_Integer_as_index():
if hasattr(int, '__index__'): # Python 2.5+ (PEP 357)
assert 'hello'[Integer(2):] == 'llo'
def test_Rational_int():
assert int( Rational(7, 5)) == 1
assert int( Rational(1, 2)) == 0
assert int(-Rational(1, 2)) == 0
assert int(-Rational(7, 5)) == -1
def test_zoo():
b = Symbol('b', bounded=True)
nz = Symbol('nz', nonzero=True)
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
im = Symbol('i', imaginary=True)
c = Symbol('c', complex=True)
pb = Symbol('pb', positive=True, bounded=True)
nb = Symbol('nb', negative=True, bounded=True)
imb = Symbol('ib', imaginary=True, bounded=True)
for i in [I, S.Infinity, S.NegativeInfinity, S.Zero, S.One, S.Pi, S.Half, S(3), log(3),
b, nz, p, n, im, pb, nb, imb, c]:
if i.is_bounded and (i.is_real or i.is_imaginary):
assert i + zoo is zoo
assert i - zoo is zoo
assert zoo + i is zoo
assert zoo - i is zoo
elif i.is_bounded is not False:
assert (i + zoo).is_Add
assert (i - zoo).is_Add
assert (zoo + i).is_Add
assert (zoo - i).is_Add
else:
assert (i + zoo) is S.NaN
assert (i - zoo) is S.NaN
assert (zoo + i) is S.NaN
assert (zoo - i) is S.NaN
if i.is_nonzero and (i.is_real or i.is_imaginary):
assert i*zoo is zoo
assert zoo*i is zoo
elif i.is_zero:
assert i*zoo is S.NaN
assert zoo*i is S.NaN
else:
assert (i*zoo).is_Mul
assert (zoo*i).is_Mul
if (1/i).is_nonzero and (i.is_real or i.is_imaginary):
assert zoo/i is zoo
elif (1/i).is_zero:
assert zoo/i is S.NaN
else:
assert (zoo/i).is_Mul
assert (I*oo).is_Mul # allow directed infinity
assert zoo + zoo is S.NaN
assert zoo * zoo is zoo
assert zoo - zoo is S.NaN
assert zoo/zoo is S.NaN
assert zoo**zoo is S.NaN
assert zoo**0 is S.One
assert zoo**2 is zoo
assert 1/zoo is S.Zero
assert Mul.flatten([S(-1), oo, S(0)]) == ([S.NaN], [], None)
def test_issue_1023():
x = Symbol('x', nonpositive=True)
assert (oo + x).is_Add
x = Symbol('x', bounded=True)
assert (oo + x).is_Add # x could be imaginary
x = Symbol('x', finite=True)
assert (oo + x).is_Add # x could be imaginary
x = Symbol('x', infinitesimal=True)
assert (oo + x).is_Add # x could be imaginary
x = Symbol('x', nonnegative=True)
assert oo + x == oo
x = Symbol('x', bounded=True, real=True)
assert oo + x == oo
x = Symbol('x', finite=True, real=True)
assert oo + x == oo
x = Symbol('x', infinitesimal=True, real=True)
assert oo + x == oo
# similarily for negative infinity
x = Symbol('x', nonnegative=True)
assert (-oo + x).is_Add
x = Symbol('x', bounded=True)
assert (-oo + x).is_Add
x = Symbol('x', finite=True)
assert (-oo + x).is_Add
x = Symbol('x', infinitesimal=True)
assert (-oo + x).is_Add
x = Symbol('x', nonpositive=True)
assert -oo + x == -oo
x = Symbol('x', bounded=True, real=True)
assert -oo + x == -oo
x = Symbol('x', finite=True, real=True)
assert -oo + x == -oo
x = Symbol('x', infinitesimal=True, real=True)
assert -oo + x == -oo
def test_GoldenRatio_expand():
assert GoldenRatio.expand(func=True) == S.Half + sqrt(5)/2
def test_as_content_primitive():
assert S.Zero.as_content_primitive() == (1, 0)
assert S.Half.as_content_primitive() == (S.Half, 1)
assert (-S.Half).as_content_primitive() == (S.Half, -1)
assert S(3).as_content_primitive() == (3, 1)
assert S(3.1).as_content_primitive() == (1, 3.1)
@XFAIL
def test_hashing_sympy_integers():
# Test for issue #1973
# http://code.google.com/p/sympy/issues/detail?id=1973
assert hash(S(4)) == 4
assert hash(S(4)) == hash(int(4))
def test_issue_1073():
assert int((E**100).round()) == \
26881171418161354484126255515800135873611119
assert int((pi**100).round()) == \
51878483143196131920862615246303013562686760680406
assert int((Rational(1)/EulerGamma**100).round()) == \
734833795660954410469466
@XFAIL
def test_mpmath_issues():
from sympy.mpmath.libmp.libmpf import _normalize
import sympy.mpmath.libmp as mlib
rnd = mlib.round_nearest
mpf = (0, 0L, -123, -1, 53, rnd) # nan
assert _normalize(mpf, 53) != (0, 0L, 0, 0)
mpf = (0, 0L, -456, -2, 53, rnd) # +inf
assert _normalize(mpf, 53) != (0, 0L, 0, 0)
mpf = (1, 0L, -789, -3, 53, rnd) # -inf
assert _normalize(mpf, 53) != (0, 0L, 0, 0)
from sympy.mpmath.libmp.libmpf import fnan
assert mlib.mpf_eq(fnan, fnan)
def test_Catalan_EulerGamma_prec():
n = GoldenRatio
f = Float(n.n(), 5)
assert f._mpf_ == (0, 212079L, -17, 18)
assert f._prec == 20
assert n._as_mpf_val(20) == f._mpf_
n = EulerGamma
f = Float(n.n(), 5)
assert f._mpf_ == (0, 302627L, -19, 19)
assert f._prec == 20
assert n._as_mpf_val(20) == f._mpf_
def test_Float_eq():
assert Float(.12, 3) != Float(.12, 4)
assert Float(.12, 3) == .12
assert 0.12 == Float(.12, 3)
assert Float('.12', 22) != .12
def test_int_NumberSymbols():
assert [int(i) for i in [pi, EulerGamma, E, GoldenRatio, Catalan]] == \
[3, 0, 2, 1, 0]
def test_3541():
from sympy.mpmath.libmp.libmpf import (
_normalize as mpf_normalize, finf, fninf, fzero)
# fnan is not included because Float no longer returns fnan,
# but otherwise, the same sort of test could apply
assert Float(finf).is_zero is False
assert Float(fninf).is_zero is False
assert bool(Float(0)) is False
def test_3250():
assert Float('23.e3', '')._prec == 10
assert Float('23e3', '')._prec == 20
assert Float('23000', '')._prec == 20
assert Float('-23000', '')._prec == 20
def test_mpf_norm():
assert mpf_norm((1, 0, 1, 0), 10) == mpf('0')._mpf_
assert Float._new((1, 0, 1, 0), 10)._mpf_ == mpf('0')._mpf_
| gpl-3.0 | -749,902,200,067,874,200 | 31.528602 | 91 | 0.548729 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.