code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.keywordsearch import get_query, get_project_models
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from rest_framework.generics import ListAPIView
from csacompendium.search.api.globalsearch.globalsearchserializers import global_search_serializers
# from rest_framework.response import Response
def global_search_views():
"""
Global search views
:return: All global search views
:rtype: Object
"""
global_search_serializer = global_search_serializers()
class GlobalSearchListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
serializer_class = global_search_serializer['GlobalSearchListSerializer']
pagination_class = APILimitOffsetPagination
def get_queryset(self):
"""
Return the list of items for this view.
:return global_results: List of items from global search
:rtype: List object as an instance of 'QuerySet'
"""
query_param = self.request.query_params.get('query', None)
global_results = []
model_count = 0
if query_param:
for model in get_project_models('csacompendium'):
entry_query = get_query(query_param, model)
if entry_query:
query_result = model.objects.filter(entry_query).distinct()
if query_result.exists():
global_results += query_result
if model_count == 1:
global_results = []
break
model_count += 1
if global_results:
global_results.sort(key=lambda x: x.time_created)
return global_results
return global_results
def get_serializer_class(self):
"""
Return the class to use for the serializer.
:return: Serializer class
:rtype: Object
"""
obj = self.get_queryset()
if obj:
for model in obj:
self.serializer_class.Meta.model = model.__class__
return self.serializer_class
return self.serializer_class
# def get_serializer(self, *args, **kwargs):
# serializer_class = self.get_serializer_class()
# kwargs['context'] = self.request
# serialize_data = False
# for model in args[0]:
# serialize_data = True if serializer_class.Meta.model == model.__class__ else False
# if serialize_data:
# return serializer_class(*args, **kwargs)
# return Response(None)
return {
'GlobalSearchListAPIView': GlobalSearchListAPIView
}
| nkoech/csacompendium | csacompendium/search/api/globalsearch/globalsearchviews.py | Python | mit | 2,957 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a method for fetching Service Configuration from Google Service
Management API."""
import logging
import json
import os
import urllib3
from apitools.base.py import encoding
import google.api.gen.servicecontrol_v1_messages as messages
from oauth2client import client
from urllib3.contrib import appengine
logger = logging.getLogger(__name__)
_GOOGLE_API_SCOPE = "https://www.googleapis.com/auth/cloud-platform"
_SERVICE_MGMT_URL_TEMPLATE = ("https://servicemanagement.googleapis.com"
"/v1/services/{}/configs/{}")
_SERVICE_NAME_ENV_KEY = "ENDPOINTS_SERVICE_NAME"
_SERVICE_VERSION_ENV_KEY = "ENDPOINTS_SERVICE_VERSION"
def fetch_service_config(service_name=None, service_version=None):
"""Fetches the service config from Google Serivce Management API.
Args:
service_name: the service name. When this argument is unspecified, this
method uses the value of the "SERVICE_NAME" environment variable as the
service name, and raises ValueError if the environment variable is unset.
service_version: the service version. When this argument is unspecified,
this method uses the value of the "SERVICE_VERSION" environment variable
as the service version, and raises ValueError if the environment variable
is unset.
Returns: the fetched service config JSON object.
Raises:
ValueError: when the service name/version is neither provided as an
argument or set as an environment variable; or when the fetched service
config fails validation.
Exception: when the Google Service Management API returns non-200 response.
"""
if not service_name:
service_name = _get_env_var_or_raise(_SERVICE_NAME_ENV_KEY)
if not service_version:
service_version = _get_env_var_or_raise(_SERVICE_VERSION_ENV_KEY)
service_mgmt_url = _SERVICE_MGMT_URL_TEMPLATE.format(service_name,
service_version)
access_token = _get_access_token()
headers = {"Authorization": "Bearer {}".format(access_token)}
http_client = _get_http_client()
response = http_client.request("GET", service_mgmt_url, headers=headers)
status_code = response.status
if status_code != 200:
message_template = "Fetching service config failed (status code {})"
_log_and_raise(Exception, message_template.format(status_code))
logger.debug('obtained service json from the management api:\n%s', response.data)
service = encoding.JsonToMessage(messages.Service, response.data)
_validate_service_config(service, service_name, service_version)
return service
def _get_access_token():
credentials = client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(_GOOGLE_API_SCOPE)
return credentials.get_access_token().access_token
def _get_http_client():
if appengine.is_appengine_sandbox():
return appengine.AppEngineManager()
else:
return urllib3.PoolManager()
def _get_env_var_or_raise(env_variable_name):
if env_variable_name not in os.environ:
message_template = 'The "{}" environment variable is not set'
_log_and_raise(ValueError, message_template.format(env_variable_name))
return os.environ[env_variable_name]
def _validate_service_config(service, expected_service_name,
expected_service_version):
service_name = service.name
if not service_name:
_log_and_raise(ValueError, "No service name in the service config")
if service_name != expected_service_name:
message_template = "Unexpected service name in service config: {}"
_log_and_raise(ValueError, message_template.format(service_name))
service_version = service.id
if not service_version:
_log_and_raise(ValueError, "No service version in the service config")
if service_version != expected_service_version:
message_template = "Unexpected service version in service config: {}"
_log_and_raise(ValueError, message_template.format(service_version))
def _log_and_raise(exception_class, message):
logger.error(message)
raise exception_class(message)
| catapult-project/catapult | third_party/google-endpoints/google/api/config/service_config.py | Python | bsd-3-clause | 4,729 |
#静态类
class Lang:
"""语言包配置
>>Demo:
from lang import Lang
print Lang.getLang("ErrorCode")
"""
@staticmethod
def getLang(name):
"""获取语言包配置属性,参数name:属性名"""
return Lang.__langconfig[name]
__langconfig = {
"ErrorCode": 10000,
"ErrorInfo": "系统繁忙!",
"LoadError": "加载数据失败!",
"St1404_OverfulfilNumNotEnough":"该佣兵灵魂数量不足,无法招募!",
"St1114_IsNotGiftBag":"此物品不是礼包!",
"St1425_MercenarySoulNotEnough":"佣兵灵魂为空!",
"St1425_MercenaryOverfulfil":"是否对该佣兵进行突破潜能?",
"St1425_MercenaryNoRecruitment":"该佣兵未招募!",
"St1425_OverfulfilNumNotEnough":"该佣兵灵魂数量不足,无法突破!",
"St1484_GeneralAndAbilityIsExist":"佣兵已经存在此魂技!",
"St1484_OperateDefaultAbilityError":"不允许对默认魂技进行操作!",
"St1442_GeneralLvNotUserLv":"佣兵等级不能高于会长等级3倍!",
"St1442_GeneralLvIsMax":"佣兵等级已达到上限!",
"St5108_CombatReplayFail":"战斗回放失败!",
"St9302_IsNotFriend":"该用户不是您的好友,请重新选择好友",
"St9302_OverMaxLength":"信件内容过长",
"St9302_NoMail":"尊敬的会长,暂时没有收到伊妹儿!",
"St9302_Minutes":"分钟前",
"St9302_Hours":"小时前",
"St9302_Days":"天前",
"St7012_IntegralNotEnough":"积分不足",
"St1449_GeneralHaveEqu":"请先卸下传承佣兵身上的装备",
"St1449_GeneralHaveCrystal":"请先卸下传承佣兵身上的水晶",
"St1449_GeneralHaveAbility":"请先卸下传承佣兵身上的魂技",
"St1425_OverfulfilNumNotEnough":"该佣兵灵魂数量不足,无法突破",
"St3013_NoWizard":"精灵已使用完!",
"St13002_gameUserLv":"玩家等级不够,不能挑战!",
"St13002_BattleRount":"玩家挑战次数已满,今日不能再挑战!",
"St4405_UserLvNotEnough":"玩家未达到10级,不能进行挑战!",
"St4405_ChallengeChanceNotEnough":"今日已无挑战机会!",
"St4407_FightFail":"战斗失败,无法获得加强属性!",
"St4407_NoAddProperty":"该层无加强属性!",
"St4408_ProNotAvailable":"该属性加成暂不可用!",
"St12057_UserLvNotEnough":"玩家未达到20级,暂未开启‘遗迹考古’活动!",
"St12053_HasEnoughMapCount":"该怪物的所有碎片已集齐,无需继续挑战!",
"St12053_EnergyNotEnough":"精力不足,请等待精力恢复后继续战斗!",
"St12053_BuyOneChallenge":"挑战次数达到上限,是否花费10晶石增加一次挑战机会?",
"St12101_NotLairTreasure":"龙穴获取奖励表失败",
"St12102_GameCoinNotEnough":"金币不足",
"St12102_PayGoldNotEnough":"晶石不足,是否立即充值?",
"St12102_LairNumNot":"剩余次数不足!",
"Gold":"晶石",
"GameGoin":"金币",
"Today":"今天",
"Tomorrow":"明天",
"DateFormatMMdd":"MM月dd日",
"GetAccessFailure":"获取受权失败!"
} | wenhulove333/ScutServer | Sample/Koudai/Server/src/ZyGames.Tianjiexing.Server/Script/PyScript/Lib/lang.py | Python | mit | 3,385 |
"""
curtains.py
Problem: Calculate how much material to buy, given the size of the windows.
Target Users: My friend who wants to make some curtains
Target System: GNU/Linux
Interface: Command-line
Functional Requirements: Print out the required length of fabric in meters
Print out the total price of the fabric
User must be able to input the measurements of the window
Testing: Simple run test
Maintainer: maintainer@website.com
"""
__version__ = 0.1
# To start with, all the measurements will be in cm
# Assume that the roll of material is going to be 140cm wide
# and that the price per meter will be 5 units of currency
roll_width = 140
price_per_metre = 5
# Prompt the user to input the window measurements in cm
window_height = input('Enter the height of the window (cm): ')
window_width = input('Enter the width of the window (cm): ')
# Add a bit for the hems
# First we must convert the string into a number
# otherwise we will get an error if we try to perform arithmetic on a text string
curtain_width = float(window_width) * 0.75 + 20
curtain_length = float(window_height) + 15
# Work out how many widths of cloth will be needed
# and figure out the total length of material for each curtain (in cm still)
widths = curtain_width / roll_width
total_length = curtain_length * widths
# Actually there are two curtains, so we must double the amount of material
# and then divide by 10 to get the number of meters
total_length = (total_length * 2) / 10
# Finally, work out how much it will cost
price = total_length * price_per_metre
# And print out the result
print("You need", total_length, "meters of cloth for ", price)
| r-castro/Python | curtain.py | Python | gpl-3.0 | 1,637 |
"""
===============
tao package
===============
.. toctree::
:maxdepth: 2
admin
assets
context_processors
datasets
decorators
demo
development
forms
mail
models
pagination
qa
settings
test
time
ui_modules
urls
widgets
workflow
xml_util
"""
| IntersectAustralia/asvo-tao | web/tao/__init__.py | Python | gpl-3.0 | 306 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import Form
from odoo.addons.stock.tests.test_report import TestReportsCommon
class TestMrpStockReports(TestReportsCommon):
def test_report_forecast_1_mo_count(self):
""" Creates and configures a product who could be produce and could be a component.
Plans some producing and consumming MO and check the report values.
"""
# Create a variant attribute.
product_chocolate = self.env['product.product'].create({
'name': 'Chocolate',
'type': 'consu',
})
product_chococake = self.env['product.product'].create({
'name': 'Choco Cake',
'type': 'product',
})
product_double_chococake = self.env['product.product'].create({
'name': 'Double Choco Cake',
'type': 'product',
})
# Creates two BOM: one creating a regular slime, one using regular slimes.
bom_chococake = self.env['mrp.bom'].create({
'product_id': product_chococake.id,
'product_tmpl_id': product_chococake.product_tmpl_id.id,
'product_uom_id': product_chococake.uom_id.id,
'product_qty': 1.0,
'type': 'normal',
'bom_line_ids': [
(0, 0, {'product_id': product_chocolate.id, 'product_qty': 4}),
],
})
bom_double_chococake = self.env['mrp.bom'].create({
'product_id': product_double_chococake.id,
'product_tmpl_id': product_double_chococake.product_tmpl_id.id,
'product_uom_id': product_double_chococake.uom_id.id,
'product_qty': 1.0,
'type': 'normal',
'bom_line_ids': [
(0, 0, {'product_id': product_chococake.id, 'product_qty': 2}),
],
})
# Creates two MO: one for each BOM.
mo_form = Form(self.env['mrp.production'])
mo_form.product_id = product_chococake
mo_form.bom_id = bom_chococake
mo_form.product_qty = 10
mo_1 = mo_form.save()
mo_form = Form(self.env['mrp.production'])
mo_form.product_id = product_double_chococake
mo_form.bom_id = bom_double_chococake
mo_form.product_qty = 2
mo_2 = mo_form.save()
report_values, docs, lines = self.get_report_forecast(product_template_ids=product_chococake.product_tmpl_id.ids)
draft_picking_qty = docs['draft_picking_qty']
draft_production_qty = docs['draft_production_qty']
self.assertEqual(len(lines), 0, "Must have 0 line.")
self.assertEqual(draft_picking_qty['in'], 0)
self.assertEqual(draft_picking_qty['out'], 0)
self.assertEqual(draft_production_qty['in'], 10)
self.assertEqual(draft_production_qty['out'], 4)
# Confirms the MO and checks the report lines.
mo_1.action_confirm()
mo_2.action_confirm()
report_values, docs, lines = self.get_report_forecast(product_template_ids=product_chococake.product_tmpl_id.ids)
draft_picking_qty = docs['draft_picking_qty']
draft_production_qty = docs['draft_production_qty']
self.assertEqual(len(lines), 2, "Must have two line.")
line_1 = lines[0]
line_2 = lines[1]
self.assertEqual(line_1['document_in'].id, mo_1.id)
self.assertEqual(line_1['quantity'], 4)
self.assertEqual(line_1['document_out'].id, mo_2.id)
self.assertEqual(line_2['document_in'].id, mo_1.id)
self.assertEqual(line_2['quantity'], 6)
self.assertEqual(line_2['document_out'], False)
self.assertEqual(draft_picking_qty['in'], 0)
self.assertEqual(draft_picking_qty['out'], 0)
self.assertEqual(draft_production_qty['in'], 0)
self.assertEqual(draft_production_qty['out'], 0)
def test_report_forecast_2_production_backorder(self):
""" Creates a manufacturing order and produces half the quantity.
Then creates a backorder and checks the report.
"""
# Configures the warehouse.
warehouse = self.env.ref('stock.warehouse0')
warehouse.manufacture_steps = 'pbm_sam'
# Configures a product.
product_apple_pie = self.env['product.product'].create({
'name': 'Apple Pie',
'type': 'product',
})
product_apple = self.env['product.product'].create({
'name': 'Apple',
'type': 'consu',
})
bom = self.env['mrp.bom'].create({
'product_id': product_apple_pie.id,
'product_tmpl_id': product_apple_pie.product_tmpl_id.id,
'product_uom_id': product_apple_pie.uom_id.id,
'product_qty': 1.0,
'type': 'normal',
'bom_line_ids': [
(0, 0, {'product_id': product_apple.id, 'product_qty': 5}),
],
})
# Creates a MO and validates the pick components.
mo_form = Form(self.env['mrp.production'])
mo_form.product_id = product_apple_pie
mo_form.bom_id = bom
mo_form.product_qty = 4
mo_1 = mo_form.save()
mo_1.action_confirm()
pick = mo_1.move_raw_ids.move_orig_ids.picking_id
pick_form = Form(pick)
with pick_form.move_line_ids_without_package.edit(0) as move_line:
move_line.qty_done = 20
pick = pick_form.save()
pick.button_validate()
# Produces 3 products then creates a backorder for the remaining product.
mo_form = Form(mo_1)
mo_form.qty_producing = 3
mo_1 = mo_form.save()
action = mo_1.button_mark_done()
backorder_form = Form(self.env['mrp.production.backorder'].with_context(**action['context']))
backorder = backorder_form.save()
backorder.action_backorder()
mo_2 = (mo_1.procurement_group_id.mrp_production_ids - mo_1)
# Checks the forecast report.
report_values, docs, lines = self.get_report_forecast(product_template_ids=product_apple_pie.product_tmpl_id.ids)
self.assertEqual(len(lines), 1, "Must have only one line about the backorder")
self.assertEqual(lines[0]['document_in'].id, mo_2.id)
self.assertEqual(lines[0]['quantity'], 1)
self.assertEqual(lines[0]['document_out'], False)
# Produces the last unit.
mo_form = Form(mo_2)
mo_form.qty_producing = 1
mo_2 = mo_form.save()
mo_2.button_mark_done()
# Checks the forecast report.
report_values, docs, lines = self.get_report_forecast(product_template_ids=product_apple_pie.product_tmpl_id.ids)
self.assertEqual(len(lines), 0, "Must have no line")
def test_report_forecast_3_report_line_corresponding_to_mo_highlighted(self):
""" When accessing the report from a MO, checks if the correct MO is highlighted in the report
"""
product_banana = self.env['product.product'].create({
'name': 'Banana',
'type': 'product',
})
product_chocolate = self.env['product.product'].create({
'name': 'Chocolate',
'type': 'consu',
})
# We create 2 identical MO
mo_form = Form(self.env['mrp.production'])
mo_form.product_id = product_banana
mo_form.product_qty = 10
with mo_form.move_raw_ids.new() as move:
move.product_id = product_chocolate
mo_1 = mo_form.save()
mo_2 = mo_1.copy()
(mo_1 | mo_2).action_confirm()
# Check for both MO if the highlight (is_matched) corresponds to the correct MO
for mo in [mo_1, mo_2]:
context = mo.action_product_forecast_report()['context']
_, _, lines = self.get_report_forecast(product_template_ids=product_banana.product_tmpl_id.ids, context=context)
for line in lines:
if line['document_in'] == mo:
self.assertTrue(line['is_matched'], "The corresponding MO line should be matched in the forecast report.")
else:
self.assertFalse(line['is_matched'], "A line of the forecast report not linked to the MO shoud not be matched.")
| jeremiahyan/odoo | addons/mrp/tests/test_stock_report.py | Python | gpl-3.0 | 8,272 |
"""empty message
Revision ID: 46c97aa9285d
Revises: None
Create Date: 2015-01-20 10:34:38.095562
"""
# revision identifiers, used by Alembic.
revision = '46c97aa9285d'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('game_config',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=250), nullable=False),
sa.Column('config', sa.Unicode(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
op.drop_table('users')
op.drop_table('game_config')
### end Alembic commands ###
| chudichudichudi/neuro-tedx-2 | migrations/versions/46c97aa9285d_.py | Python | bsd-3-clause | 1,843 |
import os
import askmanta
import json
from dateutil import parser as dateparser
from askmanta.environment import client
class Phase(object):
# phase name = "job name: phase i"
def __init__(self, i, spec, directive):
self.i = i
self.spec = spec
self.directive = directive
self.type = spec.get('type', 'map')
self.init = spec.get('init', [])
self.manifests = []
self.assets = spec.get('assets', [])
self.count = None
self.memory = None
self.disk = None
if 'sh' in spec:
self.executable = spec['sh']
elif 'script' in spec:
path = os.path.join(self.directive.manta_tmp, 'scripts', spec['script'])
self.executable = path
spec.setdefault('dependencies', {}).setdefault('scripts', []).append(spec['script'])
else:
raise ValueError()
platforms = spec.get('dependencies', False)
if platforms:
for platform, dependencies in platforms.items():
manifest = askmanta.manifest.platforms[platform](platform, self)
manifest.add(*dependencies)
self.manifests.append(manifest)
@property
def name(self):
return "{directive}: step {i}/{n}".format(
directive=self.directive.name, i=self.i, n=len(self.directive))
def serialize(self):
assets = self.assets
init = self.init
for manifest in self.manifests:
init = init + manifest.init
for store in self.directive.stores.values():
assets = assets + [store.archive_destination]
instruction = {
'type': self.type,
'init': ";\n".join(init),
'assets': assets,
'exec': self.executable,
}
for key in ['count', 'memory', 'disk']:
option = getattr(self, key)
if option:
instruction[key] = option
return instruction
class Directive(object):
def __init__(self, name, spec, root):
self.name = name
self.spec = spec
# local root
self.root = root
# manta root
self.manta_root = "/{account}/stor/directives/{name}".format(
account=client.account, name=name)
self.tmp = "/tmp/askmanta/{name}".format(name=self.name)
self.manta_tmp = "/var/tmp"
self.stores = {}
self.parse()
def parse(self):
self.phases = [Phase(i, spec, self) for i, spec in enumerate(self.spec)]
for store in self.stores.values():
if store.is_active:
instruction = "cd /var/tmp && tar xvzf {src}".format(
src=store.archive_asset)
for phase in self.phases:
phase.init.insert(0, instruction)
def serialize(self):
return [phase.serialize() for phase in self.phases]
def build(self):
for store in self.stores.values():
if store.is_active:
store.save()
phases_filename = os.path.join(self.tmp, 'phases.json')
json.dump(self.serialize(), open(phases_filename, 'w'), indent=4)
def stage(self):
# TODO: support for -f --fresh, which would client.rmr(base) first
client.mkdirp(self.manta_root)
# TODO: client.get_object(name), check `mtime` > self.store.ctime and if so, abort
# (current version already uploaded)
for store in self.stores.values():
if not store.is_active:
continue
print store.archive_destination
client.put_object(
store.archive_destination,
file=open(store.path),
durability_level='1',
)
def submit(self, inputs):
job_id = client.create_job(self.serialize(), self.name)
client.add_job_inputs(job_id, inputs)
client.end_job_input(job_id)
return Job(id=job_id)
def run(self, inputs):
self.build()
self.stage()
return self.submit(inputs=inputs)
def local_run(self, inputs):
"""
* create a virtualenv for every step, with just the packages for that step
* emulate how Joyent pipes lines into stdin
* local runs can't really work if the phases have side-effects, but if
they don't and if the input files are local too, things should work swimmingly
"""
raise NotImplementedError()
def __len__(self):
return len(self.spec)
class File(object):
def __init__(self, path):
self.path = path
@property
def content(self):
if not hasattr(self, '_content'):
self._content = client.get_object(self.path)
return self._content
def json(self):
return json.loads(self.content)
class Job(object):
# we can initialize a job with either an id
# (for an existing job) or a directive (for
# a job that's already running)
def __init__(self, id=None, directive=None):
self.id = id
self.directive = directive
self.root = os.path.join('/', client.account, 'jobs', id)
# TODO: distinguish between live and archived jobs
#self.path = os.path.join(self.root, 'live/status')
self.path = os.path.join(self.root, 'job.json')
self.errors = []
self.outputs = []
self.is_done = None
def poll(self):
self.raw = raw = File(self.path).json()
self.name = raw['name']
self.state = raw['state']
self.is_done = raw['state'] == 'done'
self.stats = raw['stats']
current = raw['stats']['tasksDone']
total = raw['stats']['tasks']
self.cursor = (current, total)
self.ctime = dateparser.parse(self.raw['timeCreated'])
if self.stats['errors']:
try:
# TODO: distinguish between live and archived jobs
# live/err => err.txt
overview_path = os.path.join(self.root, 'live/err')
overview = File(overview_path).json()
stderr_path = overview['stderr']
stderr = File(stderr_path)
self.errors.append(stderr)
except:
pass
if self.stats['outputs']:
pass
def delete(self):
# too lazy to implement this in Python...
subprocess.call(["mrm", "-r", self.root]) | debrouwere/askmanta | askmanta/job.py | Python | mit | 6,468 |
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app, db,check_ui, json_contacts):
contact = json_contacts
old_contacts = db.get_contact_list()
app.contact.create(contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) + 1 == len(new_contacts)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(),
key=Contact.id_or_max)
| SentyQ/python_training | test/test_add_contact.py | Python | apache-2.0 | 662 |
import os
import shutil
from tempfile import mkstemp
from tests import TestCase, add
from mutagen.asf import ASF, ASFHeaderError, ASFValue, UNICODE, DWORD, QWORD
from mutagen.asf import BOOL, WORD, BYTEARRAY, GUID
class TASFFile(TestCase):
def test_not_my_file(self):
self.failUnlessRaises(
ASFHeaderError, ASF,
os.path.join("tests", "data", "empty.ogg"))
self.failUnlessRaises(
ASFHeaderError, ASF,
os.path.join("tests", "data", "click.mpc"))
add(TASFFile)
try: sorted
except NameError:
def sorted(l):
n = list(l)
n.sort()
return n
class TASFInfo(TestCase):
def setUp(self):
# WMA 9.1 64kbps CBR 48khz
self.wma1 = ASF(os.path.join("tests", "data", "silence-1.wma"))
# WMA 9.1 Professional 192kbps VBR 44khz
self.wma2 = ASF(os.path.join("tests", "data", "silence-2.wma"))
# WMA 9.1 Lossless 44khz
self.wma3 = ASF(os.path.join("tests", "data", "silence-3.wma"))
def test_length(self):
self.failUnlessAlmostEqual(self.wma1.info.length, 3.7, 1)
self.failUnlessAlmostEqual(self.wma2.info.length, 3.7, 1)
self.failUnlessAlmostEqual(self.wma3.info.length, 3.7, 1)
def test_bitrate(self):
self.failUnlessEqual(self.wma1.info.bitrate / 1000, 64)
self.failUnlessEqual(self.wma2.info.bitrate / 1000, 38)
self.failUnlessEqual(self.wma3.info.bitrate / 1000, 58)
def test_sample_rate(self):
self.failUnlessEqual(self.wma1.info.sample_rate, 48000)
self.failUnlessEqual(self.wma2.info.sample_rate, 44100)
self.failUnlessEqual(self.wma3.info.sample_rate, 44100)
def test_channels(self):
self.failUnlessEqual(self.wma1.info.channels, 2)
self.failUnlessEqual(self.wma2.info.channels, 2)
self.failUnlessEqual(self.wma3.info.channels, 2)
add(TASFInfo)
class TASF(TestCase):
def setUp(self):
fd, self.filename = mkstemp(suffix='wma')
os.close(fd)
shutil.copy(self.original, self.filename)
self.audio = ASF(self.filename)
def tearDown(self):
os.unlink(self.filename)
def test_pprint(self):
self.failUnless(self.audio.pprint())
def set_key(self, key, value, result=None, expected=True):
self.audio[key] = value
self.audio.save()
self.audio = ASF(self.audio.filename)
self.failUnless(key in self.audio)
self.failUnless(key in self.audio.tags)
self.failUnless(key in self.audio.tags.keys())
self.failUnless(key in self.audio.tags.as_dict().keys())
newvalue = self.audio[key]
if isinstance(newvalue, list):
for a, b in zip(sorted(newvalue), sorted(result or value)):
self.failUnlessEqual(a, b)
else:
self.failUnlessEqual(self.audio[key], result or value)
def test_contains(self):
self.failUnlessEqual("notatag" in self.audio.tags, False)
def test_inval_type(self):
self.failUnlessRaises(ValueError, ASFValue, "", 4242)
def test_repr(self):
repr(ASFValue(u"foo", UNICODE, stream=1, language=2))
def test_auto_guuid(self):
value = ASFValue('\x9eZl}\x89\xa2\xb5D\xb8\xa30\xfe', GUID)
self.set_key(u"WM/WMCollectionGroupID", value, [value])
def test_auto_unicode(self):
self.set_key(u"WM/AlbumTitle", u"foo",
[ASFValue(u"foo", UNICODE)])
def test_auto_unicode_list(self):
self.set_key(u"WM/AlbumTitle", [u"foo", u"bar"],
[ASFValue(u"foo", UNICODE), ASFValue(u"bar", UNICODE)])
def test_word(self):
self.set_key(u"WM/Track", ASFValue(24, WORD), [ASFValue(24, WORD)])
def test_auto_word(self):
self.set_key(u"WM/Track", 12,
[ASFValue(12, DWORD)])
def test_auto_word_list(self):
self.set_key(u"WM/Track", [12, 13],
[ASFValue(12, WORD), ASFValue(13, WORD)])
def test_auto_dword(self):
self.set_key(u"WM/Track", 12,
[ASFValue(12, DWORD)])
def test_auto_dword_list(self):
self.set_key(u"WM/Track", [12, 13],
[ASFValue(12, DWORD), ASFValue(13, DWORD)])
def test_auto_qword(self):
self.set_key(u"WM/Track", 12L,
[ASFValue(12, QWORD)])
def test_auto_qword_list(self):
self.set_key(u"WM/Track", [12L, 13L],
[ASFValue(12, QWORD), ASFValue(13, QWORD)])
def test_auto_bool(self):
self.set_key(u"IsVBR", True,
[ASFValue(True, BOOL)])
def test_auto_bool_list(self):
self.set_key(u"IsVBR", [True, False],
[ASFValue(True, BOOL), ASFValue(False, BOOL)])
def test_basic_tags(self):
self.set_key("Title", "Wheeee", ["Wheeee"])
self.set_key("Author", "Whoooo", ["Whoooo"])
self.set_key("Copyright", "Whaaaa", ["Whaaaa"])
self.set_key("Description", "Wii", ["Wii"])
self.set_key("Rating", "5", ["5"])
def test_stream(self):
self.audio["QL/OneHasStream"] = [
ASFValue("Whee", UNICODE, stream=2),
ASFValue("Whee", UNICODE),
]
self.audio["QL/AllHaveStream"] = [
ASFValue("Whee", UNICODE, stream=1),
ASFValue("Whee", UNICODE, stream=2),
]
self.audio["QL/NoStream"] = ASFValue("Whee", UNICODE)
self.audio.save()
self.audio = ASF(self.audio.filename)
self.failUnlessEqual(self.audio["QL/NoStream"][0].stream, None)
self.failUnlessEqual(self.audio["QL/OneHasStream"][0].stream, 2)
self.failUnlessEqual(self.audio["QL/OneHasStream"][1].stream, None)
self.failUnlessEqual(self.audio["QL/AllHaveStream"][0].stream, 1)
self.failUnlessEqual(self.audio["QL/AllHaveStream"][1].stream, 2)
def test_language(self):
self.failIf("QL/OneHasLang" in self.audio)
self.failIf("QL/AllHaveLang" in self.audio)
self.audio["QL/OneHasLang"] = [
ASFValue("Whee", UNICODE, language=2),
ASFValue("Whee", UNICODE),
]
self.audio["QL/AllHaveLang"] = [
ASFValue("Whee", UNICODE, language=1),
ASFValue("Whee", UNICODE, language=2),
]
self.audio["QL/NoLang"] = ASFValue("Whee", UNICODE)
self.audio.save()
self.audio = ASF(self.audio.filename)
self.failUnlessEqual(self.audio["QL/NoLang"][0].language, None)
self.failUnlessEqual(self.audio["QL/OneHasLang"][0].language, 2)
self.failUnlessEqual(self.audio["QL/OneHasLang"][1].language, None)
self.failUnlessEqual(self.audio["QL/AllHaveLang"][0].language, 1)
self.failUnlessEqual(self.audio["QL/AllHaveLang"][1].language, 2)
def test_lang_and_stream_mix(self):
self.audio["QL/Mix"] = [
ASFValue("Whee", UNICODE, stream=1),
ASFValue("Whee", UNICODE, language=2),
ASFValue("Whee", UNICODE, stream=3, language=4),
ASFValue("Whee", UNICODE),
]
self.audio.save()
self.audio = ASF(self.audio.filename)
self.failUnlessEqual(self.audio["QL/Mix"][0].language, None)
self.failUnlessEqual(self.audio["QL/Mix"][0].stream, 1)
self.failUnlessEqual(self.audio["QL/Mix"][1].language, 2)
self.failUnlessEqual(self.audio["QL/Mix"][1].stream, 0)
self.failUnlessEqual(self.audio["QL/Mix"][2].language, 4)
self.failUnlessEqual(self.audio["QL/Mix"][2].stream, 3)
self.failUnlessEqual(self.audio["QL/Mix"][3].language, None)
self.failUnlessEqual(self.audio["QL/Mix"][3].stream, None)
def test_data_size(self):
v = ASFValue("", UNICODE, data='4\xd8\x1e\xdd\x00\x00')
self.failUnlessEqual(v.data_size(), len(v._render()))
class TASFTags1(TASF):
original = os.path.join("tests", "data", "silence-1.wma")
add(TASFTags1)
class TASFTags2(TASF):
original = os.path.join("tests", "data", "silence-2.wma")
add(TASFTags2)
class TASFTags3(TASF):
original = os.path.join("tests", "data", "silence-3.wma")
add(TASFTags3)
class TASFIssue29(TestCase):
original = os.path.join("tests", "data", "issue_29.wma")
def setUp(self):
fd, self.filename = mkstemp(suffix='wma')
os.close(fd)
shutil.copy(self.original, self.filename)
self.audio = ASF(self.filename)
def tearDown(self):
os.unlink(self.filename)
def test_issue_29_description(self):
self.audio["Description"] = "Hello"
self.audio.save()
audio = ASF(self.filename)
self.failUnless("Description" in audio)
self.failUnlessEqual(audio["Description"], ["Hello"])
del(audio["Description"])
self.failIf("Description" in audio)
audio.save()
audio = ASF(self.filename)
self.failIf("Description" in audio)
add(TASFIssue29)
class TASFLargeValue(TestCase):
original = os.path.join("tests", "data", "silence-1.wma")
def setUp(self):
fd, self.filename = mkstemp(suffix='wma')
os.close(fd)
shutil.copy(self.original, self.filename)
def tearDown(self):
os.unlink(self.filename)
def test_save_small_bytearray(self):
audio = ASF(self.filename)
audio["QL/LargeObject"] = [ASFValue("." * 0xFFFF, BYTEARRAY)]
audio.save()
self.failIf("QL/LargeObject" not in audio.to_extended_content_description)
self.failIf("QL/LargeObject" in audio.to_metadata)
self.failIf("QL/LargeObject" in dict(audio.to_metadata_library))
def test_save_large_bytearray(self):
audio = ASF(self.filename)
audio["QL/LargeObject"] = [ASFValue("." * (0xFFFF + 1), BYTEARRAY)]
audio.save()
self.failIf("QL/LargeObject" in audio.to_extended_content_description)
self.failIf("QL/LargeObject" in audio.to_metadata)
self.failIf("QL/LargeObject" not in dict(audio.to_metadata_library))
def test_save_small_string(self):
audio = ASF(self.filename)
audio["QL/LargeObject"] = [ASFValue("." * (0x7FFF - 1), UNICODE)]
audio.save()
self.failIf("QL/LargeObject" not in audio.to_extended_content_description)
self.failIf("QL/LargeObject" in audio.to_metadata)
self.failIf("QL/LargeObject" in dict(audio.to_metadata_library))
def test_save_large_string(self):
audio = ASF(self.filename)
audio["QL/LargeObject"] = [ASFValue("." * 0x7FFF, UNICODE)]
audio.save()
self.failIf("QL/LargeObject" in audio.to_extended_content_description)
self.failIf("QL/LargeObject" in audio.to_metadata)
self.failIf("QL/LargeObject" not in dict(audio.to_metadata_library))
def test_save_guid(self):
# http://code.google.com/p/mutagen/issues/detail?id=81
audio = ASF(self.filename)
audio["QL/GuidObject"] = [ASFValue(" "*16, GUID)]
audio.save()
self.failIf("QL/GuidObject" in audio.to_extended_content_description)
self.failIf("QL/GuidObject" in audio.to_metadata)
self.failIf("QL/GuidObject" not in dict(audio.to_metadata_library))
add(TASFLargeValue)
# http://code.google.com/p/mutagen/issues/detail?id=81#c4
class TASFUpdateSize(TestCase):
original = os.path.join("tests", "data", "silence-1.wma")
def setUp(self):
fd, self.filename = mkstemp(suffix='wma')
os.close(fd)
shutil.copy(self.original, self.filename)
audio = ASF(self.filename)
audio["large_value1"] = "#"*50000
audio.save()
def tearDown(self):
os.unlink(self.filename)
def test_multiple_delete(self):
audio = ASF(self.filename)
for tag in audio.keys():
del(audio[tag])
audio.save()
add(TASFUpdateSize)
| hanvo/MusicCloud | Crawler/Install Files/mutagen-1.22/tests/test_asf.py | Python | bsd-3-clause | 11,868 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = """Co-Pierre Georg (co-pierre.georg@uct.ac.za)"""
import abc
from xml.etree import ElementTree
# -------------------------------------------------------------------------
#
# class Config
#
# -------------------------------------------------------------------------
class BaseConfig(object):
"""
Class variables: __metaclass__, identifier, static_parameters, variable_parameters
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_identifier(self):
return
@abc.abstractmethod
def set_identifier(self, _identifier):
"""
Class variables: identifier
Local variables: _identifier
"""
if not isinstance(_identifier, str):
raise TypeError
else:
self.identifier = _identifier
return
identifier = abc.abstractproperty(get_identifier, set_identifier)
# identifier of the specific environment used for distinguishing them / logging
# identifier should be a string
@abc.abstractmethod
def get_static_parameters(self):
return
@abc.abstractmethod
def set_static_parameters(self, _params):
"""
Class variables: static_parameters
Local variables: _params
"""
if not isinstance(_params, dict):
raise TypeError
else:
self.static_parameters = _params
return
static_parameters = abc.abstractproperty(get_static_parameters, set_static_parameters)
# static parameters of the environment store parameters determining
# the behaviour of the simulation with a fixed value
# static_parameters should be a dictionary
@abc.abstractmethod
def get_variable_parameters(self):
return
@abc.abstractmethod
def set_variable_parameters(self, _params):
"""
Class variables: variable_parameters
Local variables: _params
"""
if not isinstance(_params, dict):
raise TypeError
else:
self.variable_parameters = _params
return
variable_parameters = abc.abstractproperty(get_variable_parameters, set_variable_parameters)
# variable parameters of the environment store parameters determining
# the behaviour of the simulation with a range of values
# variable_parameters should be a dictionary
@abc.abstractmethod
def add_static_parameter(self, name, value):
"""
Class variables: static_parameters
Local variables: name, value
"""
self.static_parameters[name] = value
# an abstract method for adding a static parameter to the stack of static parameters
@abc.abstractmethod
def add_variable_parameter(self, name, range_from, range_to):
"""
Class variables: variable_parameters
Local variables: name, range_from, range_to
"""
self.variable_parameters[name] = [range_from, range_to]
# an abstract method for adding a variable parameter to the stack of variable parameters
@abc.abstractmethod
def print_parameters(self):
"""
Class variables: static_parameters, variable_parameters
Local variables: key
"""
for key in self.static_parameters:
print(str(key) + ": " + str(self.static_parameters[key]))
for key in self.variable_parameters:
print(str(key) + ":" + " range: " + str(self.variable_parameters[key][0]) + "-" + str(self.variable_parameters[key][1]))
# an abstract method for printing all (static and variable) parameters
# this is for testing purposes, do not use print in production
@abc.abstractmethod
def write_environment_file(self, file_name):
out_file = open(file_name + "-check.xml", 'w')
text = self.__str__()
out_file.write(text)
out_file.close()
# an abstract method for writing a file with environment config to the current directory
@abc.abstractmethod
def __str__(self):
"""
Class variables: identifier, static_parameters, variable_parameters
Local variables: out_str, entry, value, from_value, to_value
"""
out_str = "<config identifier='" + self.identifier + "'>\n"
for entry in self.static_parameters:
value = self.static_parameters[entry]
if isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
out_str = out_str + " <parameter type='static' name='" + entry + "' value='" + str(value) + "'></parameter>\n"
else:
raise TypeError
for entry in self.variable_parameters:
if isinstance(self.variable_parameters[entry], list):
from_value = self.variable_parameters[entry][0]
to_value = self.variable_parameters[entry][1]
out_str = out_str + " <parameter type='variable' name='" + entry + "' range='" + str(from_value) + "-" + \
str(to_value) + "'></parameter>\n"
else:
raise TypeError
out_str = out_str + "</config>"
return out_str
# an abstract method returning a string with environment's config
@abc.abstractmethod
def __init__(self):
"""
Class variables: identifier, static_parameters, variable_parameters
Local variables:
"""
self.identifier = ""
self.static_parameters = {}
self.variable_parameters = {}
# an abstract method for initializing the environment
@abc.abstractmethod
def read_xml_config_file(self, config_file_name):
"""
Class variables: identifier, static_parameters, variable_parameters
Local variables: xmlText, config_file_name, element, subelement, name, value, format_correct, range_from, range_to
"""
xmlText = open(config_file_name).read()
element = ElementTree.XML(xmlText)
self.identifier = element.attrib['identifier']
# loop over all entries in the xml file
for subelement in element:
name = subelement.attrib['name']
if subelement.attrib['type'] == 'static':
try: # we see whether the value is a float
value = float(subelement.attrib['value'])
except: # if not, it is a string
value = str(subelement.attrib['value'])
self.static_parameters[name] = value
if subelement.attrib['type'] == 'variable':
format_correct = True
try:
range_from = float(subelement.attrib['range'].rsplit("-")[0])
except:
format_correct = False
print("<< ERROR: range_from must be a float or int. Found: " + str(subelement.attrib['range'].rsplit("-")[0]))
try:
range_to = float(subelement.attrib['range'].rsplit("-")[1])
except:
format_correct = False
print("<< ERROR: range_to must be a float or int. Found: " + str(subelement.attrib['range'].rsplit("-")[1]))
if format_correct:
self.variable_parameters[name] = [range_from, range_to]
else:
print("<< ERROR: FOUND ERROR IN FILE " + config_file_name + ", ABORTING")
# an abstract method for reading an xml file with config
# and adding all the static and variable parameters
@abc.abstractproperty
def agents(self):
pass
# a list of all the agents, list of lists [types][instances]
@abc.abstractmethod
def agents_generator(self):
if self.agents is not None:
for agent_type in self.agents:
if type(agent_type) == list:
for agent in agent_type:
yield agent
else:
yield agent_type
else:
raise LookupError('There are no agents to iterate over.')
# a standard method for iterating over all agents
@abc.abstractmethod
def get_agent_by_id(self, ident):
to_return = None
for agent in self.agents_generator():
if agent.identifier == ident:
if to_return is None: # checks whether something has been found previously in the function
to_return = agent
else:
raise LookupError('At least two agents have the same ID.')
# if we have found something before then IDs are not unique, so we raise an error
if to_return is None:
raise LookupError('No agents have the provided ID.')
# if we don't find any agent with that ID we raise an error
else:
return to_return
# a standard method for returning an agent based on a unique ID
@abc.abstractmethod
def check_global_transaction_balance(self, type_):
sum_lists = 0 # global sum, for checking the consistency numerically
# We check all the banks first
for agent in self.agents_generator():
# Dictionaries to keep all the incoming and outgoing transactions of the bank
tranx_list_from = {}
tranx_list_to = {}
# We populate the above with the amounts
for tranx in agent.accounts:
if tranx.type_ == type_:
if tranx.from_ == agent:
if tranx.to in tranx_list_to:
tranx_list_to[tranx.to] = tranx_list_to[tranx.to] + tranx.amount
else:
tranx_list_to[tranx.to] = tranx.amount
else:
if tranx.from_ in tranx_list_from:
tranx_list_from[tranx.from_] = tranx_list_from[tranx.from_] + tranx.amount
else:
tranx_list_from[tranx.from_] = tranx.amount
# And we check if the added transactions exist in the counterparty's books
# If they do we subtract the amount from the dictionaries
# So that we can check if the dictionaries go to 0 globally
for key in tranx_list_from:
for tranx in key.accounts:
if tranx.type_ == type_:
if tranx.from_ == key:
if tranx.to == agent:
tranx_list_from[key] = tranx_list_from[key] - tranx.amount
for key in tranx_list_to:
for tranx in key.accounts:
if tranx.type_ == type_:
if tranx.to == key:
if tranx.from_ == agent:
tranx_list_to[key] = tranx_list_to[key] - tranx.amount
# Then we add the dictionary entries to the global check variable
for key in tranx_list_from:
sum_lists = sum_lists + abs(tranx_list_from[key])
for key in tranx_list_to:
sum_lists = sum_lists + abs(tranx_list_to[key])
# We make the final check and return True if consistent, otherwise return False
if sum_lists == 0:
return True
else:
return False
# a standard method for making sure the transactions of a given type
# are consistent across all agents, ie the same transaction is of the same amount
# on both agents it concerns
@abc.abstractmethod
def __getattr__(self, attr):
if (attr in self.static_parameters) and (attr in self.variable_parameters):
raise AttributeError('The same name exists in both static and variable parameters.')
else:
try:
return self.static_parameters[attr]
except:
try:
return self.variable_parameters[attr]
except:
raise AttributeError('Environment has no attribute "%s".' % attr)
# a standard method for returning attributes from the dectionaries as attributes
@abc.abstractmethod
def accrue_interests(self):
done_list = [] # This keeps the IDs of updated transactions
# The above is important as the same transactions may be on the books
# of different agents, we don't want to double count the interest
for agent in self.agents_generator(): # Iterate over all agents
for tranx in agent.accounts: # Iterate over all transactions
if tranx.identifier not in done_list: # If not amended previously
# The below adds the interest on the principal amount
tranx.amount = tranx.amount + tranx.amount * tranx.interest
# The below makes sure that we don't double count
done_list.append(tranx.identifier)
# a standard method for accruing interest on all transactions
| cogeorg/black_rhino | examples/solow/abm_template/src/baseconfig.py | Python | gpl-3.0 | 13,029 |
# #
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Module for doing parallel builds. This uses a PBS-like cluster. You should be able to submit jobs (which can have
dependencies)
Support for PBS is provided via the PbsJob class. If you want you could create other job classes and use them here.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import math
import os
import subprocess
import easybuild.tools.config as config
from easybuild.framework.easyblock import get_easyblock_instance
from easybuild.framework.easyconfig.easyconfig import ActiveMNS
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import get_repository, get_repositorypath
from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version
from easybuild.tools.pbs_job import PbsJob, connect_to_server, disconnect_from_server, get_ppn
from easybuild.tools.repository.repository import init_repository
from vsc.utils import fancylogger
_log = fancylogger.getLogger('parallelbuild', fname=False)
def build_easyconfigs_in_parallel(build_command, easyconfigs, output_dir=None, prepare_first=True):
"""
easyconfigs is a list of easyconfigs which can be built (e.g. they have no unresolved dependencies)
this function will build them in parallel by submitting jobs
@param build_command: build command to use
@param easyconfigs: list of easyconfig files
@param output_dir: output directory
returns the jobs
"""
_log.info("going to build these easyconfigs in parallel: %s", easyconfigs)
job_ids = {}
# dependencies have already been resolved,
# so one can linearly walk over the list and use previous job id's
jobs = []
# create a single connection, and reuse it
conn = connect_to_server()
if conn is None:
_log.error("connect_to_server returned %s, can't submit jobs." % (conn))
# determine ppn once, and pass is to each job being created
# this avoids having to figure out ppn over and over again, every time creating a temp connection to the server
ppn = get_ppn()
def tokey(dep):
"""Determine key for specified dependency."""
return ActiveMNS().det_full_module_name(dep)
for ec in easyconfigs:
# this is very important, otherwise we might have race conditions
# e.g. GCC-4.5.3 finds cloog.tar.gz but it was incorrectly downloaded by GCC-4.6.3
# running this step here, prevents this
if prepare_first:
prepare_easyconfig(ec)
# the new job will only depend on already submitted jobs
_log.info("creating job for ec: %s" % str(ec))
new_job = create_job(build_command, ec, output_dir=output_dir, conn=conn, ppn=ppn)
# sometimes unresolved_deps will contain things, not needed to be build
job_deps = [job_ids[dep] for dep in map(tokey, ec['unresolved_deps']) if dep in job_ids]
new_job.add_dependencies(job_deps)
# place user hold on job to prevent it from starting too quickly,
# we might still need it in the queue to set it as a dependency for another job;
# only set hold for job without dependencies, other jobs have a dependency hold set anyway
with_hold = False
if not job_deps:
with_hold = True
# actually (try to) submit job
new_job.submit(with_hold)
_log.info("job for module %s has been submitted (job id: %s)" % (new_job.module, new_job.jobid))
# update dictionary
job_ids[new_job.module] = new_job.jobid
new_job.cleanup()
jobs.append(new_job)
# release all user holds on jobs after submission is completed
for job in jobs:
if job.has_holds():
_log.info("releasing hold on job %s" % job.jobid)
job.release_hold()
disconnect_from_server(conn)
return jobs
def submit_jobs(ordered_ecs, cmd_line_opts, testing=False):
"""
Submit jobs.
@param ordered_ecs: list of easyconfigs, in the order they should be processed
@param cmd_line_opts: list of command line options (in 'longopt=value' form)
"""
curdir = os.getcwd()
# the options to ignore (help options can't reach here)
ignore_opts = ['robot', 'job']
# generate_cmd_line returns the options in form --longopt=value
opts = [x for x in cmd_line_opts if not x.split('=')[0] in ['--%s' % y for y in ignore_opts]]
# compose string with command line options, properly quoted and with '%' characters escaped
opts_str = subprocess.list2cmdline(opts).replace('%', '%%')
command = "unset TMPDIR && cd %s && eb %%(spec)s %s --testoutput=%%(output_dir)s" % (curdir, opts_str)
_log.info("Command template for jobs: %s" % command)
job_info_lines = []
if testing:
_log.debug("Skipping actual submission of jobs since testing mode is enabled")
else:
jobs = build_easyconfigs_in_parallel(command, ordered_ecs)
job_info_lines = ["List of submitted jobs:"]
job_info_lines.extend(["%s (%s): %s" % (job.name, job.module, job.jobid) for job in jobs])
job_info_lines.append("(%d jobs submitted)" % len(jobs))
return '\n'.join(job_info_lines)
def create_job(build_command, easyconfig, output_dir=None, conn=None, ppn=None):
"""
Creates a job, to build a *single* easyconfig
@param build_command: format string for command, full path to an easyconfig file will be substituted in it
@param easyconfig: easyconfig as processed by process_easyconfig
@param output_dir: optional output path; --regtest-output-dir will be used inside the job with this variable
@param conn: open connection to PBS server
@param ppn: ppn setting to use (# 'processors' (cores) per node to use)
returns the job
"""
if output_dir is None:
output_dir = 'easybuild-build'
# capture PYTHONPATH, MODULEPATH and all variables starting with EASYBUILD
easybuild_vars = {}
for name in os.environ:
if name.startswith("EASYBUILD"):
easybuild_vars[name] = os.environ[name]
others = ["PYTHONPATH", "MODULEPATH"]
for env_var in others:
if env_var in os.environ:
easybuild_vars[env_var] = os.environ[env_var]
_log.info("Dictionary of environment variables passed to job: %s" % easybuild_vars)
# obtain unique name based on name/easyconfig version tuple
ec_tuple = (easyconfig['ec']['name'], det_full_ec_version(easyconfig['ec']))
name = '-'.join(ec_tuple)
# create command based on build_command template
command = build_command % {
'spec': easyconfig['spec'],
'output_dir': os.path.join(os.path.abspath(output_dir), name),
}
# just use latest build stats
repo = init_repository(get_repository(), get_repositorypath())
buildstats = repo.get_buildstats(*ec_tuple)
resources = {}
if buildstats:
previous_time = buildstats[-1]['build_time']
resources['hours'] = int(math.ceil(previous_time * 2 / 60))
job = PbsJob(command, name, easybuild_vars, resources=resources, conn=conn, ppn=ppn)
job.module = easyconfig['ec'].full_mod_name
return job
def prepare_easyconfig(ec):
"""
Prepare for building specified easyconfig (fetch sources)
@param ec: parsed easyconfig (EasyConfig instance)
"""
try:
easyblock_instance = get_easyblock_instance(ec)
easyblock_instance.update_config_template_run_step()
easyblock_instance.fetch_step(skip_checksums=True)
_log.debug("Cleaning up log file %s..." % easyblock_instance.logfile)
easyblock_instance.close_log()
os.remove(easyblock_instance.logfile)
except (OSError, EasyBuildError), err:
_log.error("An error occured while preparing %s: %s" % (ec, err))
| pneerincx/easybuild-framework | easybuild/tools/parallelbuild.py | Python | gpl-2.0 | 8,870 |
# -*- python -*-
# Copyright (C) 2009 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/opt/codesourcery/arm-none-eabi/share/gcc-4.5.1/python'
libdir = '/opt/codesourcery/arm-none-eabi/lib/thumb2'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir in sys.path:
sys.path.insert(0, dir)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| GeeteshKhatavkar/gh0st_kernel_samsung_royxx | arm-2010.09/arm-none-eabi/lib/thumb2/libstdc++.a-gdb.py | Python | gpl-2.0 | 2,345 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"This is some interesting educational program"
# wxRays (C) 2013 Serhii Lysovenko
#
# This program is free software; you can redistribute it and / or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 - 1301, USA.
# Used to guarantee to use at least Wx2.8
import wxversion
import os.path as osp
from sys import argv
wxversion.ensureMinimal('2.8')
import wx
class MainFrame(wx.Frame):
'The Main Frame'
def __init__(self):
last_size = eval(APP_SETT.get("frame_size", "(550, 350)"))
wx.Frame.__init__(self, None, - 1, PROG_NAME, size=last_size)
from v_plot import Plot
from v_menu import Active_menu
from c_file import ascii_file_load
self.data = {}
adb = {'data': self.data, 'window': self}
self.addons_data = {' base ': adb}
self.a_menu = Active_menu(self)
self.plot = Plot(self.a_menu)
self.prev_dir = '.'
adb.update({'plot': self.plot, 'menu': self.a_menu, 'loaders': []})
APP_SETT.addons.introduce(self.addons_data)
adb['loaders'].insert(0, (_('Commented dat files'), ('.dat',),
ascii_file_load))
self.a_menu.set_menu_bar()
self.plot.mk_menuitems()
self.canvas = self.plot.get_canvas(self)
self.SetToolBar(self.plot.get_toolbar())
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_CLOSE(self, self.OnWindowClose)
wx.EVT_SIZE(self, self.sizeHandler)
self.a_menu.action_catch("on init")
self.SetSizer(self.sizer)
self.statusBar = wx.StatusBar(self, -1)
self.statusBar.SetFieldsCount(1)
self.SetStatusBar(self.statusBar)
self.plot.set_statusbar(self.statusBar)
def OnCreate(self):
if len(argv) > 1:
from v_dialogs import load_data_file
load_data_file(self, argv[1:],
self.addons_data[' base ']['loaders'])
def OnPaint(self, event):
self.canvas.draw()
event.Skip()
def sizeHandler(self, event):
self.canvas.SetInitialSize(self.GetClientSize())
event.Skip()
def OnWindowClose(self, event):
APP_SETT.addons.terminate(self.addons_data, True)
APP_SETT.set("frame_size", repr(self.GetSizeTuple()))
self.Destroy()
def OnDataFile(self, event):
from v_dialogs import load_data_file
load_data_file(self, None, self.addons_data[' base ']['loaders'])
def AboutMe(self, evt):
from v_dialogs import about_box
about_box()
class TheSplashScreen(wx.SplashScreen):
def __init__(self):
splash_fname = osp.join(osp.dirname(__file__), u'splash.png')
image = wx.Image(splash_fname, wx.BITMAP_TYPE_PNG)
bmp = image.ConvertToBitmap()
self.bmp = image.Resize((200, 200), (0, 0)).ConvertToBitmap()
wx.SplashScreen.__init__(self, bmp, wx.SPLASH_CENTRE_ON_SCREEN |
wx.SPLASH_TIMEOUT, 5000, None, - 1)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.FutureCall(2000, self.ShowMain)
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
import settings
settings.prog_init()
frame = MainFrame()
frame.SetIcon(wx.IconFromBitmap(self.bmp))
self.Hide()
frame.OnCreate()
frame.Show()
class App(wx.App):
def OnInit(self):
splash = TheSplashScreen()
splash.Show()
return True
def main():
app = App(False)
app.MainLoop()
APP_SETT.save()
| Lysovenko/wxRays | v_face.py | Python | gpl-3.0 | 4,571 |
import pytest
import numpy as np
from eli5._feature_names import FeatureNames
# See also test_sklearn_utils.py::test_get_feature_names
def test_feature_names_filtered():
filtered, indices = (FeatureNames(['one', 'two', 'twenty-two'])
.filtered(lambda name: 'two' in name))
assert indices == [1, 2]
assert list(filtered) == ['two', 'twenty-two']
filtered, indices = (
FeatureNames({1: 'two', 3: 'twenty-two', 5: 'two-thirds'}, unkn_template='%d',
n_features=6, bias_name='foo')
.filtered(lambda name: name.startswith('two')))
assert indices == [1, 5]
assert filtered.bias_name is None
assert filtered.unkn_template == '%d'
assert list(filtered) == ['two', 'two-thirds']
filtered, indices = (FeatureNames(['a', 'b'], bias_name='bias')
.filtered(lambda name: 'b' in name))
assert indices == [1, 2]
assert filtered.bias_name == 'bias'
assert list(filtered) == ['b', 'bias']
filtered, indices = (FeatureNames(unkn_template='x%d', n_features=6)
.filtered(lambda name: False))
assert indices == []
filtered, indices = (
FeatureNames(['one', 'two', 'twenty-two'])
.filtered(lambda name, value: 't' in name and value <= 1,
x=[0, 1, 2]))
assert indices == [1]
assert list(filtered) == ['two']
def test_feature_names_handle_filter():
filtered, indices = (FeatureNames(['one', 'two', 'twenty-two'])
.handle_filter(lambda name: 'two' in name, feature_re=None))
assert indices == [1, 2]
assert list(filtered) == ['two', 'twenty-two']
filtered, indices = (FeatureNames(['one', 'two', 'twenty-two'])
.handle_filter(feature_filter=None, feature_re='two'))
assert indices == [1, 2]
assert list(filtered) == ['two', 'twenty-two']
filtered, indices = FeatureNames(['one', 'two']).handle_filter(None, None)
assert indices is None
assert list(filtered) == ['one', 'two']
with pytest.raises(ValueError):
FeatureNames(['one', 'two']).handle_filter(lambda name: True, '.*')
def test_init():
with pytest.raises(ValueError):
FeatureNames()
with pytest.raises(ValueError):
FeatureNames(unkn_template='%d')
with pytest.raises(ValueError):
FeatureNames(n_features=10)
with pytest.raises(ValueError):
FeatureNames(['a'], n_features=10)
with pytest.raises(TypeError):
FeatureNames({'a', 'b'})
with pytest.raises(ValueError):
FeatureNames({0: 'a', 1: 'b'}, n_features=10)
FeatureNames(unkn_template='%d', n_features=10)
FeatureNames(['a', 'b'])
FeatureNames({0: 'a', 1: 'b'})
FeatureNames({0: 'a', 1: 'b'}, n_features=10, unkn_template='x%d')
def test_slice():
FN = FeatureNames
assert FN(['one', 'two', 'three'])[1:] == ['two', 'three']
assert FN(['one', 'two', 'three'])[:-2] == ['one']
assert FN(['one', 'two', 'three'])[1:] == ['two', 'three']
assert FN({1: 'one'}, n_features=3, unkn_template='x%d')[:] \
== ['x0', 'one', 'x2']
assert FN({1: 'one'}, n_features=3, unkn_template='x%d',
bias_name='bias')[-3:] \
== ['one', 'x2', 'bias']
assert FN(['one', 'two', 'three'], bias_name='bias')[-1:] == ['bias']
assert FN(np.array(['one', 'two', 'three']), bias_name='bias')[-1:] \
== ['bias']
assert FN(np.array(['one', 'two', 'three']), bias_name='bias')[-2:] \
== ['three', 'bias']
assert list(FN(np.array(['one', 'two', 'three']))[-2:]) == ['two', 'three']
@pytest.mark.parametrize(
['feature_names'], [
[FeatureNames(['x1', 'x2', 'x3'])],
[FeatureNames(['x1', 'x2', 'x3'], bias_name='<BIAS>')],
[FeatureNames(np.array(['x1', 'x2', 'x3']))],
[FeatureNames({0: 'x1', 1: 'x2'})],
[FeatureNames(n_features=5, unkn_template='%d')],
])
def test_add_feature(feature_names):
len_before = len(feature_names)
storage = feature_names.feature_names
new_feature = 'new'
new_idx = feature_names.add_feature(new_feature)
assert len(feature_names) == len_before + 1
assert feature_names[new_idx] == new_feature
if storage is not None:
assert storage is not feature_names.feature_names
| TeamHG-Memex/eli5 | tests/test_feature_names.py | Python | mit | 4,329 |
#!/usr/bin/env python3
import os,sys;
from setuptools import setup;
if 'linux'!=sys.platform:
print("This package only supports Linux platform.");
exit(1);
setup(
name = 'telnet-ppp-server', # 在pip中显示的项目名称
version = '0.1',
author = 'Frank',
author_email = '',
license = 'MIT',
url = '',
description = 'PPP server and Telnet server for old platforms like Windows 3.x, Windows 95.',
python_requires = '>=3.5.0',
py_modules=['serverLib'],
scripts=['pppd.py','telnetd.py','raw-mode']
);
| frank-deng/retro-works | telnet-ppp-server/setup.py | Python | mit | 552 |
from flask import Blueprint, render_template, redirect, request, g, abort
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from career_connections.database.models import PendingUser, User
from career_connections.utils.auth import login, logout
unauth_views_bp = Blueprint('unauthenticated_views', __name__)
class SigninForm(FlaskForm):
username = StringField('username')
password = PasswordField('password')
class SignupForm(FlaskForm):
first_name = StringField('First Name')
last_name = StringField('Last Name')
email = StringField('Email')
@unauth_views_bp.route('/')
def home():
signin_form = SigninForm()
signup_form = SignupForm()
return render_template('signin.j2', login_form=signin_form, signup_form=signup_form)
@unauth_views_bp.route('/signin', methods=['POST'])
def signin():
form = SigninForm()
if form.username.data is None or form.password.data is None:
return redirect('/?signin=invalid')
if login(form.username.data, form.password.data):
return redirect('/dashboard')
return redirect('/?signin=invalid')
@unauth_views_bp.route('/signup', methods=['POST'])
def signup():
form = SignupForm()
pending_user = PendingUser(
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data)
print(pending_user.to_dict())
g.db.add(pending_user)
g.db.commit()
return redirect('/?signup=sucess')
@unauth_views_bp.route('/signout')
def signout():
logout()
return redirect('/')
| UnboundLegacy/api | career_connections/blueprints/unauthenticated_views.py | Python | mit | 1,567 |
# -*- coding: utf-8 -*-
'''
Created : 2017-01-14
@author: Eric Lapouyade
'''
from docxtpl import DocxTemplate, InlineImage
# for height and width you have to use millimeters (Mm), inches or points(Pt) class :
from docx.shared import Mm, Inches, Pt
tpl=DocxTemplate('test_files/inline_image_tpl.docx')
context = {
'myimage' : InlineImage(tpl,'test_files/python_logo.png',width=Mm(20)),
'myimageratio': InlineImage(tpl, 'test_files/python_jpeg.jpg', width=Mm(30), height=Mm(60)),
'frameworks' : [{'image' : InlineImage(tpl,'test_files/django.png',height=Mm(10)),
'desc' : 'The web framework for perfectionists with deadlines'},
{'image' : InlineImage(tpl,'test_files/zope.png',height=Mm(10)),
'desc' : 'Zope is a leading Open Source Application Server and Content Management Framework'},
{'image': InlineImage(tpl, 'test_files/pyramid.png', height=Mm(10)),
'desc': 'Pyramid is a lightweight Python web framework aimed at taking small web apps into big web apps.'},
{'image' : InlineImage(tpl,'test_files/bottle.png',height=Mm(10)),
'desc' : 'Bottle is a fast, simple and lightweight WSGI micro web-framework for Python'},
{'image': InlineImage(tpl, 'test_files/tornado.png', height=Mm(10)),
'desc': 'Tornado is a Python web framework and asynchronous networking library.'},
]
}
tpl.render(context)
tpl.save('test_files/inline_image.docx')
| rgusmero/python-docx-template | tests/inline_image.py | Python | lgpl-2.1 | 1,565 |
#!/usr/bin/python3
# Concatenator is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Concatenator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
def concatenate(Dicts, fileformat):
#Concatenates multiple Dicts into a single one
seqset = set()
for Dict in Dicts:
seqset.add(tuple(sorted(Dict)))
if len(seqset) == 1: "OK"
else: "Different taxa"
| StuntsPT/concatenator2 | concatenation.py | Python | gpl-3.0 | 945 |
import tangelo
tangelo.log("Python file plugin")
tangelo.log("This doesn't use a Python module directory.")
| Kitware/tangelo | tests/plugins/pythonfile/python.py | Python | apache-2.0 | 109 |
import sure
sure
| gabrielfalcao/carpentry | tests/unit/__init__.py | Python | gpl-3.0 | 17 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""A self-tuning trainer that produces a model for tabular datasets."""
from typing import List, Optional
from nitroml import subpipeline
from tfx import components as tfx
from tfx import types
from tfx.components.trainer import executor as trainer_executor
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import executor_spec
from tfx.proto import trainer_pb2
from google.protobuf import text_format
from nitroml.protos import problem_statement_pb2 as ps_pb2
class AutoTrainer(subpipeline.Subpipeline):
"""A self-tuning trainer that produces a model for tabular datasets.
It is designed to be used in conjunction with NitroML's `AutoData` subpipeline
using the `BasicPreprocessor`.
## Example:
```python
task = MyBenchmarkTask(...)
autodata = AutoData(
task.problem_statement,
examples=task.train_and_eval_examples,
preprocessor=BasicPreprocessor())
autotrainer= AutoTrainer(
problem_statement=task.problem_statement,
transformed_examples=autodata.transformed_examples,
transform_graph=autodata.transform_graph,
schema=autodata.schema,
train_steps=1000,
eval_steps=1000)
pipeline = task.components + autodata.components + autotrainer.components
```
"""
def __init__(self,
problem_statement: ps_pb2.ProblemStatement,
transformed_examples: types.Channel,
transform_graph: types.Channel,
schema: types.Channel,
train_steps: int,
eval_steps: int,
use_keras: bool = True,
enable_tuning: bool = False,
max_sequence_length: Optional[int] = None,
instance_name: Optional[str] = None):
"""Constructs an AutoTrainer subpipeline.
Args:
problem_statement: ProblemStatement proto identifying the task.
transformed_examples: A Channel of 'ExamplesPath' type produced from an
upstream Transform component. The source of examples that are used in
training and evaluation (required).
transform_graph: An optional Channel of 'TransformPath' type, serving as
the input transform graph if present.
schema: An optional Channel of 'SchemaPath' type, serving as the schema
of training and eval data.
train_steps: Number of steps (batches) to train for.
eval_steps: Number of steps (batches) to evaluate.
use_keras: When `True`, uses Keras Models, otherwise uses Estimators.
enable_tuning: When `True`, performs hyperparameter tuning using the
built-in `tfx.Tuner` using a tuned search-space.
max_sequence_length: For seqential prediction tasks. When > 0, the
trainer will produce a model that will produce sequential prediction of
this desired length.
instance_name: Optional unique instance name. Necessary iff multiple Tuner
components are declared in the same pipeline.
Raises:
ValueError: When a required param is not supplied.
"""
self._instance_name = instance_name
self._tuner = None
if enable_tuning:
# Search over search space of model hyperparameters.
self._tuner = tfx.Tuner(
tuner_fn='nitroml.automl.autotrainer.lib.auto_trainer.tuner_fn',
examples=transformed_examples,
transform_graph=transform_graph,
train_args=trainer_pb2.TrainArgs(num_steps=train_steps),
eval_args=trainer_pb2.EvalArgs(num_steps=eval_steps),
custom_config={
# Pass the problem statement proto as a text proto. Required
# since custom_config must be JSON-serializable.
'problem_statement':
text_format.MessageToString(
message=problem_statement, as_utf8=True),
},
instance_name=self.id)
self._trainer = tfx.Trainer(
run_fn='nitroml.automl.autotrainer.lib.auto_trainer.run_fn' if use_keras
else 'nitroml.automl.autotrainer.lib.auto_estimator_trainer.run_fn',
custom_executor_spec=(executor_spec.ExecutorClassSpec(
trainer_executor.GenericExecutor)),
transformed_examples=transformed_examples,
transform_graph=transform_graph,
schema=schema,
train_args=trainer_pb2.TrainArgs(num_steps=train_steps),
eval_args=trainer_pb2.EvalArgs(num_steps=eval_steps),
hyperparameters=self._tuner.outputs.best_hyperparameters
if self._tuner else None,
custom_config={
# Pass the problem statement proto as a text proto. Required
# since custom_config must be JSON-serializable.
'problem_statement':
text_format.MessageToString(
message=problem_statement, as_utf8=True),
'sequence_length':
max_sequence_length,
},
instance_name=self.id)
@property
def id(self) -> str:
"""Returns the AutoTrainer sub-pipeline's unique ID."""
autotrainer_instance_name = 'AutoTrainer'
if self._instance_name:
autotrainer_instance_name = f'{autotrainer_instance_name}.{self._instance_name}'
return autotrainer_instance_name
@property
def components(self) -> List[base_component.BaseComponent]:
"""Returns the AutoTrainer sub-pipeline's constituent components."""
return ([self._tuner] if self._tuner else []) + [self._trainer]
@property
def outputs(self) -> subpipeline.SubpipelineOutputs:
"""Return the AutoTrainer sub-pipeline's outputs."""
return subpipeline.SubpipelineOutputs({
'model':
self._trainer.outputs.model,
'best_hyperparameters':
self._tuner.outputs.best_hyperparameters if self._tuner else None,
})
| google/nitroml | nitroml/automl/autotrainer/subpipeline.py | Python | apache-2.0 | 6,444 |
import django
from media_tree.models import FileNode
from media_tree.admin.utils import get_current_request, is_search_request, \
get_request_attr
from django.contrib.admin.views.main import ChangeList
from django.db import models
class MediaTreeChangeList(ChangeList):
def is_filtered(self, request):
return is_search_request(request) or self.params
def __init__(self, request, *args, **kwargs):
super(MediaTreeChangeList, self).__init__(request, *args, **kwargs)
# self.parent_folder is set in get_queryset()
self.title = self.parent_folder.name if self.parent_folder else FileNode.get_top_node().name
# TODO: Move filtering by open folders here
def get_queryset(self, request=None):
# request arg was added in django r16144 (after 1.3)
if request is not None and django.VERSION >= (1, 4):
qs = super(MPTTChangeList, self).get_queryset(request)
else:
qs = super(MPTTChangeList, self).get_queryset()
request = get_current_request()
# Pagination should be disabled by default, since it interferes
# with expanded folders and might display them partially.
# However, filtered results are presented as a flat list and
# should be paginated.
pagination_enabled = self.is_filtered(request)
if not pagination_enabled:
self.show_all = True
# filter by currently expanded folders if list is not filtered by extension or media_type
self.parent_folder = self.model_admin.get_parent_folder(request)
if self.parent_folder and not pagination_enabled:
if self.parent_folder.is_top_node():
expanded_folders_pk = self.model_admin.get_expanded_folders_pk(request)
if expanded_folders_pk:
qs = qs.filter(models.Q(parent=None) | models.Q(parent__pk__in=expanded_folders_pk))
else:
qs = qs.filter(parent=None)
else:
qs = qs.filter(parent=self.parent_folder)
if request is not None and self.is_filtered(request):
return qs.order_by('name')
else:
# always order by (tree_id, left)
tree_id = qs.model._mptt_meta.tree_id_attr
left = qs.model._mptt_meta.left_attr
return qs.order_by(tree_id, left)
def get_results(self, request):
"""
Temporarily decreases the `level` attribute of all search results in
order to prevent indendation when displaying them.
"""
super(MediaTreeChangeList, self).get_results(request)
try:
reduce_levels = abs(int(get_request_attr(request, 'reduce_levels', 0)))
except TypeError:
reduce_levels = 0
is_filtered = self.is_filtered(request)
if is_filtered or reduce_levels:
for item in self.result_list:
item.prevent_save()
item.actual_level = item.level
if is_filtered:
item.reduce_levels = item.level
item.level = 0
else:
item.reduce_levels = reduce_levels
item.level = max(0, item.level - reduce_levels)
| samluescher/django-media-tree | media_tree/admin/change_list.py | Python | bsd-3-clause | 3,274 |
#!/usr/bin/env python
from optparse import OptionParser
from shutil import which
from subprocess import check_output, CalledProcessError
from sys import stderr
from flask import Flask, jsonify
from flask_sockets import Sockets
def get_parser():
parser = OptionParser(usage='%prog [-p|--port PORT] [-w|--websockets]')
parser.add_option('-p', '--port',
help='Port for X11Remote to listen on (default: %default)')
parser.add_option('-w', '--websockets', action='store_true',
help='Enable websocket support')
parser.set_defaults(port=1234)
return parser
def missing_tool_exit(*args):
for tool in args:
if not which(tool):
print(tool+' not found! Please add a '+tool+' binary to your PATH!',
file=sys.stderr)
exit(1)
def xdotool(*args):
args = list(args)
if args[0]=='mousemove_relative':
args.insert(1, '--')
args.insert(0, 'xdotool')
try:
return check_output(args).decode('utf-8')
except CalledProcessError as e:
return e.output.decode('utf-8')
app = Flask(__name__, static_url_path='')
sockets = Sockets(app)
@sockets.route('/')
def handle_socket(ws):
while not ws.closed:
print(ws.receive())
@app.route("/keymap.json")
def get_xmodmap():
dic = {}
lines = str(check_output(['xmodmap','-pke'])).split('\\n')
for l in lines:
ls = l.split('=')
if len(ls) != 2 or len(ls[1]) < 1:
continue
dic[ls[0].split()[1]] = ls[1].split()
return jsonify(dic), {'Content-Type': 'text/json; charset=utf-8'}
@app.route("/exec/<string:cmds>")
def exec(cmds):
for cmd in cmds.split('|'):
xdotool(*cmd.split())
return "ACK"
@app.route("/")
def serve():
return app.send_static_file('index.html')
if __name__ == '__main__':
opts,args = get_parser().parse_args()
missing_tool_exit('xdotool','xmodmap')
from gevent.pywsgi import WSGIServer, WSGIHandler
if opts.websockets:
from geventwebsocket.handler import WebSocketHandler
http_server = WSGIServer(('', opts.port), app,
handler_class=WebSocketHandler if opts.websockets else WSGIHandler)
http_server.serve_forever()
| apirogov/x11remote | x11remote.py | Python | mit | 2,220 |
#!/usr/bin/env python
# coding=utf-8
#-*- coding:utf-8 -*-
import random
N = 8 #八个网页
d = 0.85 #阻尼因子为0.85
delt = 0.00001 #迭代控制变量
#两个矩阵相乘
def matrix_multi(A,B):
result = [[0]*len(B[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
result[i][j] += A[i][k]*B[k][j]
return result
#矩阵A的每个元素都乘以n
def matrix_multiN(n,A):
result = [[1]*len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
result[i][j] = n*A[i][j]
return result
#两个矩阵相加
def matrix_add(A,B):
if len(A[0])!=len(B[0]) and len(A)!=len(B):
return
result = [[0]*len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
result[i][j] = A[i][j]+B[i][j]
return result
def pageRank(A):
e = []
for i in range(N):
e.append(1)
norm = 100
New_P = []
for i in range(N):
New_P.append([random.random()])
r = [ [(1-d)*i*1/N] for i in e]
while norm > delt:
P = New_P
New_P = matrix_add(r,matrix_multiN(d,matrix_multi(A,P))) #P=(1-d)*e/n+d*M'P PageRank算法的核心
norm = 0
#求解矩阵一阶范数
for i in range(N):
norm += abs(New_P[i][0]-P[i][0])
print New_P
#根据邻接矩阵求转移概率矩阵并转向
def tran_and_convert(A):
result = [[0]*len(A[0]) for i in range(len(A))]
result_convert = [[0]*len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A[0])):
result[i][j] = A[i][j]*1.0/sum(A[i])
for i in range(len(result)):
for j in range(len(result[0])):
result_convert[i][j]=result[j][i]
return result_convert
def main():
A = [[0,1,1,0,0,1,0,0],\
[0,0,0,1,1,0,0,0],\
[0,0,0,1,0,1,0,0],\
[0,0,0,0,0,1,0,0],\
[1,0,0,1,0,0,1,1],\
[0,0,0,1,0,0,0,0],\
[0,0,1,0,0,0,0,0],\
[0,0,0,1,0,0,1,0]]
M = tran_and_convert(A)
pageRank(M)
if __name__ == '__main__':
main()
| zhaochl/python-utils | agrith_util/page_rank/page_rank_test.py | Python | apache-2.0 | 2,180 |
import os
import re
from honcho.procfile import Procfile
from subprocess import Popen, PIPE
from nose.tools import assert_equal, assert_true # noqa
from mock import patch, MagicMock, call # noqa
FIXTURE_ROOT = os.path.join(os.path.dirname(__file__), 'fixtures')
try:
from nose.tools import assert_regexp_matches
except ImportError:
def assert_regexp_matches(text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, str):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise AssertionError(msg)
def assert_regexp_fails(text, failed_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(failed_regexp, str):
failed_regexp = re.compile(failed_regexp)
if failed_regexp.search(text):
msg = msg or "Regexp matched"
msg = '%s: %r found in %r' % (msg, failed_regexp.pattern, text)
raise AssertionError(msg)
def get_honcho_output(args):
os.chdir(FIXTURE_ROOT)
cmd = ['honcho']
cmd.extend(args)
# The below is mostly copy-pasted from subprocess.py's check_output (to
# support python 2.6)
process = Popen(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
output, error = process.communicate()
retcode = process.returncode
return retcode, output, error
def get_procfile(name):
with open(os.path.join(FIXTURE_ROOT, name)) as f:
return Procfile(f.read())
| gratipay/honcho | test/helpers.py | Python | mit | 1,694 |
from rest_framework import routers, serializers, viewsets, mixins, filters, relations
from munigeo.api import GeoModelSerializer
from rest_framework.serializers import ListSerializer, LIST_SERIALIZER_KWARGS
import datetime
from .models import *
import django_filters
from django import forms
from rest_framework.exceptions import ParseError
YEARS_OF_PRIVACY = 100
# for censoring principals in related instances
class CensoredManyRelatedField(relations.ManyRelatedField):
"""
Handles view permissions for related field listings with Principal or Employership instances.
"""
def to_representation(self, iterable):
if iterable.model is Employership:
iterable = iterable.filter(end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
if iterable.mode is Principal:
iterable.filter(employers__end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
return super().to_representation(iterable)
class CensoredListSerializer(serializers.ListSerializer):
"""
Handles view permissions for list serializers with Principal or Employership instances.
"""
def to_representation(self, data):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, models.Manager) else data
if iterable.model is Employership:
iterable = iterable.filter(end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
if iterable.model is Principal:
iterable = iterable.filter(employers__end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
return [
self.child.to_representation(item) for item in iterable
]
class CensoredHyperlinkedRelatedField(relations.HyperlinkedRelatedField):
"""
Handles view permissions for related field listings with Principal or Employership instances.
"""
@classmethod
def many_init(cls, *args, **kwargs):
# the correct arguments must be passed on to the parent
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in relations.MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return CensoredManyRelatedField(**list_kwargs)
class CensoredHyperlinkedModelSerializer(serializers.HyperlinkedModelSerializer):
"""
Handles view permissions for related field listings with Principal or Employership instances.
"""
serializer_related_field = CensoredHyperlinkedRelatedField
# the actual serializers
class SchoolNameSerializer(serializers.ModelSerializer):
official_name = serializers.CharField(allow_null=True, source='get_official_name')
other_names = serializers.ListField(
source='get_other_names',
child=serializers.DictField(child=serializers.CharField())
)
class Meta:
model = SchoolName
exclude = ('school',)
class SchoolLanguageSerializer(serializers.ModelSerializer):
language = serializers.CharField(source='language.name')
class Meta:
model = SchoolLanguage
exclude = ('school',)
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = '__all__'
class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Language.objects.all()
serializer_class = LanguageSerializer
class SchoolTypeNameSerializer(serializers.ModelSerializer):
class Meta:
model = SchoolTypeName
fields = '__all__'
class SchoolTypeNameViewSet(viewsets.ReadOnlyModelViewSet):
queryset = SchoolTypeName.objects.all()
serializer_class = SchoolTypeNameSerializer
paginate_by = 50
class SchoolTypeSerializer(serializers.ModelSerializer):
type = SchoolTypeNameSerializer()
class Meta:
model = SchoolType
exclude = ('school',)
class SchoolFieldNameSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='description')
class Meta:
model = SchoolFieldName
exclude = ('description',)
class SchoolFieldNameViewSet(viewsets.ReadOnlyModelViewSet):
queryset = SchoolFieldName.objects.all()
serializer_class = SchoolFieldNameSerializer
class SchoolFieldSerializer(serializers.ModelSerializer):
field = SchoolFieldNameSerializer()
class Meta:
model = SchoolField
exclude = ('school',)
class SchoolGenderSerializer(serializers.ModelSerializer):
class Meta:
model = SchoolGender
exclude = ('school',)
class SchoolNumberOfGradesSerializer(serializers.ModelSerializer):
class Meta:
model = NumberOfGrades
exclude = ('school',)
class NeighborhoodSerializer(serializers.ModelSerializer):
class Meta:
model = Neighborhood
fields = '__all__'
class AddressLocationSerializer(GeoModelSerializer):
class Meta:
model = AddressLocation
exclude = ('id', 'address')
class AddressSerializer(serializers.ModelSerializer):
location = AddressLocationSerializer(required=False)
def to_representation(self, obj):
ret = super(AddressSerializer, self).to_representation(obj)
if ret['location']:
ret['location'] = ret['location']['location']
return ret
class Meta:
model = Address
fields = '__all__'
class DataTypeSerializer(serializers.ModelSerializer):
class Meta:
model = DataType
fields = '__all__'
class ArchiveDataSerializer(serializers.ModelSerializer):
url = serializers.URLField(source='link.url')
data_type = DataTypeSerializer()
class Meta:
model = ArchiveData
exclude = ('id',)
class OwnerFounderSerializer(serializers.ModelSerializer):
type = serializers.CharField(source='type.description')
class Meta:
model = OwnerFounder
fields = '__all__'
class SchoolOwnershipSerializer(serializers.ModelSerializer):
owner = OwnerFounderSerializer()
class Meta:
model = SchoolOwnership
exclude = ('school',)
class SchoolFounderSerializer(serializers.ModelSerializer):
founder = OwnerFounderSerializer()
class Meta:
model = SchoolFounder
exclude = ('school',)
class BuildingOwnershipSerializer(serializers.ModelSerializer):
owner = OwnerFounderSerializer()
class Meta:
model = BuildingOwnership
exclude = ('building',)
class SchoolBuildingPhotoSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
# we have to reformat the URL representation so that our API serves the corresponding photo URL
# this method will have to be updated whenever Finna API changes!
representation = super(SchoolBuildingPhotoSerializer, self).to_representation(instance)
representation['url'] = representation['url'].replace(
'.finna.fi/Record/',
'.finna.fi/Cover/Show?id='
) + '&w=1200&h=1200'
return representation
class Meta:
model = SchoolBuildingPhoto
exclude = ('school_building',)
class BuildingForSchoolSerializer(serializers.ModelSerializer):
neighborhood = serializers.CharField(source='neighborhood.name')
addresses = AddressSerializer(many=True)
owners = BuildingOwnershipSerializer(many=True)
photos = serializers.ListField(
source='get_photos',
child=SchoolBuildingPhotoSerializer()
)
class Meta:
model = Building
# fields must be declared here to get both id and url
fields = ('url', 'id', 'neighborhood', 'addresses', 'construction_year',
'architect', 'architect_firm', 'property_number', 'sliced',
'comment', 'reference', 'approx', 'owners', 'photos')
class PrincipalForSchoolSerializer(serializers.ModelSerializer):
"""
This class is needed for the School endpoint
"""
class Meta:
model = Principal
list_serializer_class = CensoredListSerializer
# fields must be declared here to get both id and url
fields = ('url', 'id', 'surname', 'first_name',)
class EmployershipForSchoolSerializer(serializers.ModelSerializer):
principal = PrincipalForSchoolSerializer()
class Meta:
model = Employership
list_serializer_class = CensoredListSerializer
exclude = ('nimen_id',)
class SchoolBuildingForSchoolSerializer(serializers.ModelSerializer):
"""
This class is needed for the School and Principal endpoints
"""
photos = SchoolBuildingPhotoSerializer(many=True)
building = BuildingForSchoolSerializer()
class Meta:
model = SchoolBuilding
depth = 5
# fields must be declared to get both id and url
fields = ('url', 'id', 'building', 'photos', 'approx_begin', 'approx_end',
'begin_day', 'begin_month', 'begin_year', 'end_day', 'end_month', 'end_year',
'ownership', 'reference',)
class SchoolforSchoolContinuumSerializer(CensoredHyperlinkedModelSerializer):
names = SchoolNameSerializer(many=True)
class Meta:
model = School
# fields must be declared here to explicitly include id along with url
fields = ('url', 'id', 'names')
class SchoolContinuumActiveSerializer(CensoredHyperlinkedModelSerializer):
target_school = SchoolforSchoolContinuumSerializer()
def to_representation(self, instance):
# translate joins and separations to English
representation = super().to_representation(instance)
representation['description'] = representation['description'].replace(
'yhdistyy', 'joins').replace('eroaa', 'separates from')
return representation
class Meta:
model = SchoolContinuum
fields = ('active_school', 'description', 'target_school', 'day', 'month', 'year',
'reference',)
class SchoolContinuumTargetSerializer(CensoredHyperlinkedModelSerializer):
active_school = SchoolforSchoolContinuumSerializer()
def to_representation(self, instance):
# translate joins and separations to English
representation = super().to_representation(instance)
representation['description'] = representation['description'].replace(
'yhdistyy', 'joins').replace('eroaa', 'separates from')
return representation
class Meta:
model = SchoolContinuum
fields = ('active_school', 'description', 'target_school', 'day', 'month', 'year',
'reference',)
class LifecycleEventSerializer(serializers.ModelSerializer):
description = serializers.CharField(source='type.description')
class Meta:
model = LifecycleEvent
fields = ('description', 'day', 'month', 'year', 'decisionmaker', 'additional_info')
class SchoolSerializer(CensoredHyperlinkedModelSerializer):
names = SchoolNameSerializer(many=True)
languages = SchoolLanguageSerializer(many=True)
types = SchoolTypeSerializer(many=True)
fields = SchoolFieldSerializer(many=True)
genders = SchoolGenderSerializer(many=True)
grade_counts = SchoolNumberOfGradesSerializer(many=True)
buildings = SchoolBuildingForSchoolSerializer(many=True)
owners = SchoolOwnershipSerializer(many=True)
founders = SchoolFounderSerializer(many=True)
principals = EmployershipForSchoolSerializer(many=True)
archives = ArchiveDataSerializer(many=True, required=False)
lifecycle_event = LifecycleEventSerializer(many=True, required=False)
continuum_active = SchoolContinuumActiveSerializer(many=True, required=False)
continuum_target = SchoolContinuumTargetSerializer(many=True, required=False)
class Meta:
model = School
# fields must be declared here to explicitly include id along with url
fields = ('url', 'id', 'names', 'languages', 'types', 'fields', 'genders',
'grade_counts', 'buildings', 'owners', 'founders', 'principals',
'special_features', 'wartime_school', 'nicknames', 'checked',
'archives', 'lifecycle_event', 'continuum_active', 'continuum_target')
class SchoolBuildingSerializer(CensoredHyperlinkedModelSerializer):
photos = SchoolBuildingPhotoSerializer(many=True)
school = SchoolSerializer()
building = BuildingForSchoolSerializer()
class Meta:
model = SchoolBuilding
depth = 5
# fields must be declared to get both id and url
fields = ('url', 'id', 'building', 'photos', 'school', 'approx_begin', 'approx_end',
'begin_day', 'begin_month', 'begin_year', 'end_day', 'end_month', 'end_year',
'ownership', 'reference',)
class EmployershipForPrincipalSerializer(serializers.ModelSerializer):
school = SchoolSerializer()
class Meta:
model = Employership
list_serializer_class = CensoredListSerializer
exclude = ('nimen_id',)
class PrincipalSerializer(serializers.ModelSerializer):
employers = EmployershipForPrincipalSerializer(many=True)
class Meta:
model = Principal
# depth required to get all related data
depth = 5
# fields must be declared to get both id and url
fields = ('url', 'id', 'surname', 'first_name', 'employers')
class EmployershipSerializer(EmployershipForSchoolSerializer):
school = SchoolSerializer()
class Meta:
model = Employership
exclude = ('nimen_id',)
class SchoolBuildingForBuildingSerializer(serializers.ModelSerializer):
photos = SchoolBuildingPhotoSerializer(many=True)
school = SchoolSerializer()
class Meta:
model = SchoolBuilding
depth = 5
# fields must be declared to get both id and url
fields = ('url', 'id', 'photos', 'school', 'approx_begin', 'approx_end',
'begin_day', 'begin_month', 'begin_year', 'end_day', 'end_month', 'end_year',
'ownership', 'reference',)
class BuildingSerializer(serializers.ModelSerializer):
neighborhood = serializers.CharField(source='neighborhood.name')
addresses = AddressSerializer(many=True)
schools = SchoolBuildingForBuildingSerializer(many=True)
class Meta:
model = Building
exclude = ('photo',)
class InclusiveFilter(django_filters.Filter):
"""
Filter for including entries where the field is null
"""
def filter(self, qs, value):
originalqs = super().filter(qs, value)
self.lookup_expr = 'isnull'
nullqs = super().filter(qs, value)
return nullqs | originalqs
class InclusiveNumberFilter(InclusiveFilter):
field_class = forms.DecimalField
class NameOrIdFilter(django_filters.Filter):
"""
Filter that switches search target between name and "id", depending on input
"""
table, underscore, column = "", "", ""
def filter(self, qs, value):
if str(value).isdigit():
self.field_class = forms.DecimalField
if not self.column:
# store table and column name
self.table, self.underscore, self.column = self.name.rpartition('__')
# overwrite column name with column id
self.name = self.table + '__id'
else:
self.field_class = forms.CharField
if self.column:
# overwrite column id with column name
self.name = self.table + '__' + self.column
return super().filter(qs, value)
class GenderFilter(django_filters.CharFilter):
"""
Filter that maps letters m, f and c to hard-coded genders
"""
GENDER_MAP = {
'm': 'poikakoulu',
'f': 'tyttökoulu',
'c': 'tyttö- ja poikakoulu'
}
def filter(self, qs, value):
if value in ([], (), {}, None, ''):
return qs
val = str(value).lower()
if val not in self.GENDER_MAP and val not in self.GENDER_MAP.values():
raise ParseError("Gender must be 'm', 'f' or 'c' (for coed)")
value = self.GENDER_MAP.get(val, val)
return super().filter(qs, value)
class SchoolFilter(django_filters.FilterSet):
# the end year can be null, so we cannot use a default filter
from_year = InclusiveNumberFilter(name="names__end_year", lookup_expr='gte')
until_year = django_filters.NumberFilter(name="names__begin_year", lookup_expr='lte')
type = NameOrIdFilter(name="types__type__name", lookup_expr='iexact')
field = NameOrIdFilter(name="fields__field__description", lookup_expr='iexact')
language = NameOrIdFilter(name="languages__language__name", lookup_expr='iexact')
gender = GenderFilter(name="genders__gender", lookup_expr='iexact')
class Meta:
model = School
fields = ['type',
'field',
'language',
'gender',
'from_year',
'until_year']
class SchoolViewSet(viewsets.ReadOnlyModelViewSet):
queryset = School.objects.all()
serializer_class = SchoolSerializer
filter_backends = (filters.SearchFilter, filters.DjangoFilterBackend)
filter_class = SchoolFilter
search_fields = ('names__types__value',)
class NameFilter(django_filters.CharFilter):
"""
Filter that checks fields 'first_name' and 'surname'
"""
table, underscore, column = "", "", ""
def filter(self, qs, value):
self.table, self.underscore, self.column = self.name.rpartition('__')
if self.table:
self.name = self.table + '__' + 'first_name'
else:
self.name = 'first_name'
first_name_qs = super().filter(qs, value)
if self.table:
self.name = self.table + '__' + 'surname'
else:
self.name = 'surname'
surname_qs = super().filter(qs, value)
return first_name_qs | surname_qs
class PrincipalFilter(django_filters.FilterSet):
# the end year can be null, so we cannot use a default filter
from_year = InclusiveNumberFilter(name="employers__end_year", lookup_expr='gte')
until_year = django_filters.NumberFilter(name="employers__begin_year", lookup_expr='lte')
search = NameFilter(name="surname", lookup_expr='icontains')
school_type = NameOrIdFilter(name="employers__school__types__type__name", lookup_expr='iexact')
school_field = NameOrIdFilter(name="employers__school__fields__field__description", lookup_expr='iexact')
school_language = NameOrIdFilter(name="employers__school__languages__language__name", lookup_expr='iexact')
school_gender = GenderFilter(name="employers__school__genders__gender", lookup_expr='iexact')
class Meta:
model = Principal
fields = ['search',
'from_year',
'until_year',
'school_type',
'school_field',
'school_language',
'school_gender']
class EmployershipFilter(django_filters.FilterSet):
# the end year can be null, so we cannot use a default filter
from_year = InclusiveNumberFilter(name="end_year", lookup_expr='gte')
until_year = django_filters.NumberFilter(name="begin_year", lookup_expr='lte')
search = NameFilter(name="principal__surname", lookup_expr='icontains')
school_type = NameOrIdFilter(name="school__types__type__name", lookup_expr='iexact')
school_field = NameOrIdFilter(name="school__fields__field__description", lookup_expr='iexact')
school_language = NameOrIdFilter(name="school__languages__language__name", lookup_expr='iexact')
school_gender = GenderFilter(name="school__genders__gender", lookup_expr='iexact')
class Meta:
model = Employership
fields = ['search',
'from_year',
'until_year',
'school_type',
'school_field',
'school_language',
'school_gender']
class PrincipalViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Principal.objects.filter(employers__end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY).distinct()
serializer_class = PrincipalSerializer
filter_backends = (filters.SearchFilter, filters.DjangoFilterBackend)
filter_class = PrincipalFilter
class EmployershipViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Employership.objects.filter(end_year__lt=datetime.datetime.now().year-YEARS_OF_PRIVACY)
serializer_class = EmployershipSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = EmployershipFilter
class AddressFilter(django_filters.CharFilter):
"""
Filter that checks fields 'street_name_fi' and 'street_name_sv'
"""
def filter(self, qs, value):
self.name = 'building__buildingaddress__address__street_name_fi'
street_name_fi_qs = super().filter(qs, value)
self.name = 'building__buildingaddress__address__street_name_sv'
street_name_sv_qs = super().filter(qs, value)
return street_name_fi_qs | street_name_sv_qs
class SchoolBuildingFilter(django_filters.FilterSet):
# the end year can be null, so we cannot use a default filter
from_year = InclusiveNumberFilter(name="end_year", lookup_expr='gte')
until_year = django_filters.NumberFilter(name="begin_year", lookup_expr='lte')
search = AddressFilter(name="building__buildingaddress__address__street_name_fi", lookup_expr='icontains')
school_type = NameOrIdFilter(name="school__types__type__name", lookup_expr='iexact')
school_field = NameOrIdFilter(name="school__fields__field__description", lookup_expr='iexact')
school_language = NameOrIdFilter(name="school__languages__language__name", lookup_expr='iexact')
school_gender = GenderFilter(name="school__genders__gender", lookup_expr='iexact')
class Meta:
model = SchoolBuilding
fields = ['search',
'from_year',
'until_year',
'school_type',
'school_field',
'school_language',
'school_gender']
class BuildingFilter(django_filters.FilterSet):
# the end year can be null, so we cannot use a default filter
from_year = InclusiveNumberFilter(name="schools__end_year", lookup_expr='gte')
until_year = django_filters.NumberFilter(name="schools__begin_year", lookup_expr='lte')
search = AddressFilter(name="buildingaddress__address__street_name_fi", lookup_expr='icontains')
school_type = NameOrIdFilter(name="schools__school__types__type__name", lookup_expr='iexact')
school_field = NameOrIdFilter(name="schools__school__fields__field__description", lookup_expr='iexact')
school_language = NameOrIdFilter(name="schools__school__languages__language__name", lookup_expr='iexact')
school_gender = GenderFilter(name="schools__school__genders__gender", lookup_expr='iexact')
class Meta:
model = Building
fields = ['search',
'from_year',
'until_year',
'school_type',
'school_field',
'school_language',
'school_gender']
class SchoolBuildingViewSet(viewsets.ReadOnlyModelViewSet):
queryset = SchoolBuilding.objects.all()
serializer_class = SchoolBuildingSerializer
filter_backends = (filters.SearchFilter, filters.DjangoFilterBackend)
filter_class = SchoolBuildingFilter
class BuildingViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Building.objects.all()
serializer_class = BuildingSerializer
filter_backends = (filters.SearchFilter, filters.DjangoFilterBackend)
filter_class = BuildingFilter
router = routers.DefaultRouter()
router.register(r'school', SchoolViewSet)
router.register(r'principal', PrincipalViewSet)
router.register(r'employership', EmployershipViewSet)
router.register(r'school_field', SchoolFieldNameViewSet)
router.register(r'school_type', SchoolTypeNameViewSet)
router.register(r'language', LanguageViewSet)
router.register(r'building', BuildingViewSet)
router.register(r'school_building', SchoolBuildingViewSet)
| City-of-Helsinki/kore | schools/api.py | Python | agpl-3.0 | 24,286 |
"""
Test utilities.
Partially based on the code from http://code.activestate.com/recipes/52215/
Author(s): Elric Milon
"""
import logging
import os
import sys
from tribler_core.utilities.network_utils import get_random_port
__all__ = ["process_unhandled_exceptions"]
class UnhandledExceptionCatcher(object):
"""
Logs the usual tb information, followed by a listing of all the
local variables in each frame and mark the test run as failed.
"""
def __init__(self):
self._logger = logging.getLogger(self.__class__.__name__)
self._lines = []
self.last_exc = None
self.exc_counter = 0
sys.excepthook = self.catch_exception
def _register_exception_line(self, line, *format_args):
line = line % format_args
self._lines.append(line)
self._logger.critical(line)
def catch_exception(self, type, value, tb):
"""
Catch unhandled exception, log it and store it to be printed at teardown time too.
"""
self.exc_counter += 1
def repr_(value):
try:
return repr(value)
except:
return "<Error while REPRing value>"
self.last_exc = repr_(value)
self._register_exception_line("Unhandled exception raised while running the test: %s %s", type, self.last_exc)
stack = []
while tb:
stack.append(tb.tb_frame)
tb = tb.tb_next
self._register_exception_line("Locals by frame, innermost last:")
for frame in stack:
self._register_exception_line("%s:%s %s:", frame.f_code.co_filename,
frame.f_lineno, frame.f_code.co_name)
for key, value in frame.f_locals.items():
value = repr_(value)
if len(value) > 500:
value = value[:500] + "..."
self._register_exception_line("| %12s = %s", key, value)
def check_exceptions(self):
"""
Log all unhandled exceptions, clear logged exceptions and raise to fail the currently running test.
"""
if self.exc_counter:
lines = self._lines
self._lines = []
exc_counter = self.exc_counter
self.exc_counter = 0
last_exc = self.last_exc
self.last_exc = 0
self._logger.critical("The following unhandled exceptions where raised during this test's execution:")
for line in lines:
self._logger.critical(line)
raise Exception("Test raised %d unhandled exceptions, last one was: %s" % (exc_counter, last_exc))
def prepare_xml_rss(target_path, filename):
"""
Function to prepare test_rss.xml file, replace the port with a random one
"""
files_path = target_path / 'http_torrent_files'
os.mkdir(files_path)
port = get_random_port()
from tribler_core.tests.tools.common import TESTS_DATA_DIR
with open(TESTS_DATA_DIR / filename, 'r') as source_xml,\
open(target_path / filename, 'w') as destination_xml:
for line in source_xml:
destination_xml.write(line.replace('RANDOMPORT', str(port)))
return files_path, port
_catcher = UnhandledExceptionCatcher()
process_unhandled_exceptions = _catcher.check_exceptions
| hbiyik/tribler | src/tribler-core/tribler_core/tests/tools/util.py | Python | lgpl-3.0 | 3,343 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 11.3 from Kane 1985."""
from __future__ import division
from sympy import Matrix, symbols
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dynamicsymbols
R_SQ, R_QE, R_EM, R_Ee, R_Mm = symbols('R_SQ R_QE R_EM R_Ee R_Mm', positive=True)
omega_E, omega_M, omega_e, omega_m = symbols('ω_E ω_M ω_e ω_m', positive=True)
symbol_values = {R_SQ: 4.5e5, R_QE: 1.5e11, R_EM: 4.0e8,
R_Ee: 7.0e6, R_Mm: 2.0e6,
omega_E: 2e-7, omega_M: 24e-7,
omega_e: 12e-4, omega_m:10e-4}
# reference frames
S = ReferenceFrame('S')
Q = S.orientnew('Q', 'axis', [0, S.x])
E = Q.orientnew('E', 'axis', [0, S.x])
M = E.orientnew('M', 'axis', [0, S.x])
frames = [S, Q, E, M]
pS = Point('S')
pS.set_acc(S, 0)
pQ = Point('Q')
pQ.set_acc(Q, 0)
pE = Point('E')
pE.set_acc(E, 0)
pM = Point('M')
pM.set_acc(M, 0)
pe = Point('e')
pm = Point('m')
points = [pS, pQ, pE, pM, pe, pm]
# v = ω*R, a = ω**2 * R
pQ.set_acc(S, omega_E**2 * R_SQ * S.x)
pE.set_acc(Q, omega_E**2 * R_QE * S.x)
pM.set_acc(E, omega_M**2 * R_EM * S.x)
pe.set_acc(E, omega_e**2 * R_Ee * S.x)
pm.set_acc(M, omega_m**2 * R_Mm * S.x)
# v_p_A = v_p_B + cross(ω_B_A, r)
# = v_p_B, since angular vel is zero
# a_p_A = a_p_B + cross(ω_B_A, v_p_A)
# = a_p_B
# and a_p_A = a_p_Bbar + a_p_B + 2*cross(ω_B_A, v_p_B)
# = a_p_Bbar + a_p_B
pm.set_acc(E, pm.acc(M) + pM.acc(E))
pm.set_acc(Q, pm.acc(E) + pE.acc(Q))
pm.set_acc(S, pm.acc(Q) + pQ.acc(S))
pe.set_acc(M, pe.acc(E) + pM.acc(E))
pe.set_acc(Q, pe.acc(E) + pE.acc(Q))
pe.set_acc(S, pe.acc(Q) + pQ.acc(S))
pM.set_acc(Q, pM.acc(E) + pE.acc(Q))
pM.set_acc(S, pM.acc(Q) + pQ.acc(S))
pE.set_acc(M, pE.acc(E) + pM.acc(E))
pE.set_acc(S, pE.acc(Q) + pQ.acc(S))
pQ.set_acc(E, pQ.acc(Q) + pE.acc(Q))
pQ.set_acc(M, pQ.acc(E) + pM.acc(E))
pS.set_acc(Q, pS.acc(S) + pQ.acc(S))
pS.set_acc(E, pS.acc(Q) + pE.acc(Q))
pS.set_acc(M, pS.acc(E) + pM.acc(E))
#print('acc in frame\t{0}\t{1}\t{2}\t{3}'.format(*frames))
#for p in points:
# print('point {0}:\t{1:0.3g}\t{2:0.3g}\t{3:0.3g}\t{4:0.3g}'.format(
# #print('point {0}:\t{1}\t{2}\t{3}\t{4}'.format(
# p, *map(lambda x: float(p.acc(x).magnitude().subs(symbol_values)),
# frames)))
idx_Q = frames.index(Q)
print('acc in frame ratio\t{0}\t{1}\t{2}'.format(S, E, M))
acc_ratios = Matrix.zeros(4, 3)
for i, p in enumerate(points[2:]):
acc_values = map(lambda x: p.acc(x).magnitude().subs(symbol_values),
frames)
a_p_Q = acc_values[idx_Q]
acc_values = [float(x / a_p_Q)
for x in acc_values[:idx_Q] + acc_values[idx_Q + 1:]]
acc_ratios[i, :] = Matrix(acc_values).T
print('object {0}:\t\t{1:0.3g}\t{2:0.3g}\t{3:0.3g}'.format(
p, *acc_values))
print('Approximately Newtonian reference frames have a near 1 ratio.')
min_ratio = 0.9 # minimum value if frame is approximately Newtonian.
assert acc_ratios[0, 0] >= min_ratio
assert acc_ratios[0, 1] < min_ratio
assert acc_ratios[0, 2] < min_ratio
assert acc_ratios[1, 0] >= min_ratio
assert acc_ratios[1, 1] < min_ratio
assert acc_ratios[1, 2] < min_ratio
assert acc_ratios[2, 0] >= min_ratio
assert acc_ratios[2, 1] >= min_ratio
assert acc_ratios[2, 2] >= min_ratio
assert acc_ratios[3, 0] >= min_ratio
assert acc_ratios[3, 1] >= min_ratio
assert acc_ratios[3, 2] >= min_ratio
| nouiz/pydy | examples/Kane1985/Chapter6/Ex11.3.py | Python | bsd-3-clause | 3,430 |
from __future__ import unicode_literals
import unittest
from mopidy_settings import Extension, frontend as frontend_lib
class ExtensionTest(unittest.TestCase):
def test_get_default_config(self):
ext = Extension()
config = ext.get_default_config()
self.assertIn('[nadsettings]', config)
self.assertIn('enabled = true', config)
def test_get_config_schema(self):
ext = Extension()
schema = ext.get_config_schema()
# TODO Test the content of your config schema
#self.assertIn('username', schema)
#self.assertIn('password', schema)
# TODO Write more tests
| dlogik/Mopidy-NadSettings | tests/test_extension.py | Python | apache-2.0 | 646 |
'''Please note that it's Function problem i.e.
you need to write your solution in the form of Function(s) only.
Driver Code to call/invoke your function would be added by GfG's Online Judge.'''
# Your task is to complete this function
# The function prints V space separated integers where
# the ith integer denote the shortest distance of ith vertex
# from source vertex
import numpy as np
from collections import deque
def dijkstra(graph, v, s):
# print(np.array(graph))
N = len(graph)
visited = [False] * v
dist = [float('inf')] * v
dist[s] = 0
curr = s
for _ in range(len(graph)):
# select node with min dist
min_dist = float('inf')
next_node = s
for i in range(N):
if graph[curr][i] > 0 and dist[i] < min_dist:
next_node = i
curr = next_node
# visited[curr] = True
for i in range(N):
if graph[curr][i] != 0:
dist[i] = min(dist[i], dist[curr] + graph[curr][i])
print(' '.join(map(str, dist)), end='')
| isendel/algorithms | algorithms/graph/dijkstra.py | Python | apache-2.0 | 1,057 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Jul 16, 2012
"""
__author__ = "Shyue Ping Ong, Stephen Dacek"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
import unittest
import os
import json
import numpy as np
import warnings
from pymatgen.io.vasp.outputs import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar, Dynmat, BSVasprun, UnconvergedVASPWarning
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class VasprunTest(unittest.TestCase):
def test_properties(self):
filepath = os.path.join(test_dir, 'vasprun.xml.nonlm')
vasprun = Vasprun(filepath, parse_potcar_file=False)
orbs = list(vasprun.complete_dos.pdos[vasprun.final_structure[
0]].keys())
self.assertIn("S", orbs)
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=False)
#Test NELM parsing.
self.assertEqual(vasprun.parameters["NELM"], 60)
#test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][1][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][-1][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][1].shape, (301, ))
filepath2 = os.path.join(test_dir, 'lifepo4.xml')
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True,
parse_potcar_file=False)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEqual(29, len(vasprun.ionic_steps))
self.assertEqual(len(vasprun.structures), len(vasprun.ionic_steps))
self.assertEqual(vasprun.lattice,
vasprun.lattice_rec.reciprocal_lattice)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in range(len(vasprun.ionic_steps))]))
self.assertEqual(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
self.assertEqual(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
self.assertEqual(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
['PAW_PBE Li 17Jan2003', 'PAW_PBE Fe 06Sep2000',
'PAW_PBE Fe 06Sep2000', 'PAW_PBE P 17Jan2003',
'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
#test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3, parse_potcar_file=False)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
#Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
#Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6, parse_potcar_file=False)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[(Spin.up, 0,
0, 96,
Orbital.s)],
0.0032)
d = vasprun_ggau.as_dict()
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
filepath = os.path.join(test_dir, 'vasprun.xml.unconverged')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vasprun_unconverged = Vasprun(filepath, parse_potcar_file=False)
# Verify some things
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category,
UnconvergedVASPWarning))
self.assertTrue(vasprun_unconverged.converged_ionic)
self.assertFalse(vasprun_unconverged.converged_electronic)
self.assertFalse(vasprun_unconverged.converged)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt')
vasprun_dfpt = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][0], 3.33402531)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[0][1], -0.00559998)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static_wolfe[2][2], 3.31237357)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility(check_potcar_hash=False).process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.ionic')
vasprun_dfpt_ionic = Vasprun(filepath, parse_potcar_file=False)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][0], 515.73485838)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[0][1], -0.00263523)
self.assertAlmostEqual(vasprun_dfpt_ionic.epsilon_ionic[2][2], 19.02110169)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.unconverged')
vasprun_dfpt_unconv = Vasprun(filepath, parse_potcar_file=False)
self.assertFalse(vasprun_dfpt_unconv.converged_electronic)
self.assertTrue(vasprun_dfpt_unconv.converged_ionic)
self.assertFalse(vasprun_dfpt_unconv.converged)
vasprun_uniform = Vasprun(os.path.join(test_dir, "vasprun.xml.uniform"),
parse_potcar_file=False)
self.assertEqual(vasprun_uniform.kpoints.style, "Reciprocal")
vasprun_no_pdos = Vasprun(os.path.join(test_dir, "Li_no_projected.xml"),
parse_potcar_file=False)
self.assertIsNotNone(vasprun_no_pdos.complete_dos)
self.assertFalse(vasprun_no_pdos.dos_has_errors)
vasprun_diel = Vasprun(os.path.join(test_dir, "vasprun.xml.dielectric"),
parse_potcar_file=False)
self.assertAlmostEqual(0.4294,vasprun_diel.dielectric[0][10])
self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][0])
self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][1])
self.assertAlmostEqual(19.941,vasprun_diel.dielectric[1][51][2])
self.assertAlmostEqual(0.0,vasprun_diel.dielectric[1][51][3])
self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][0])
self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][1])
self.assertAlmostEqual(34.186,vasprun_diel.dielectric[2][85][2])
self.assertAlmostEqual(0.0,vasprun_diel.dielectric[2][85][3])
def test_Xe(self):
vr = Vasprun(os.path.join(test_dir, 'vasprun.xml.xe'), parse_potcar_file=False)
self.assertEquals(vr.atomic_symbols, ['Xe'])
def test_invalid_element(self):
self.assertRaises(KeyError, Vasprun, os.path.join(test_dir, 'vasprun.xml.wrong_sp'))
def test_as_dict(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath,
parse_potcar_file=False)
#Test that as_dict() is json-serializable
self.assertIsNotNone(json.dumps(vasprun.as_dict()))
self.assertEqual(
vasprun.as_dict()["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
def test_get_band_structure(self):
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = Vasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=
os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64],
"wrong vbm kpoint index")
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
def test_sc_step_overflow(self):
filepath = os.path.join(test_dir, 'vasprun.xml.sc_overflow')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath)
self.assertEqual(len(w), 3)
estep = vasprun.ionic_steps[0]['electronic_steps'][29]
self.assertTrue(np.isnan(estep['e_wo_entrp']))
def test_update_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
potcar_path = os.path.join(test_dir, 'POTCAR.LiFePO4.gz')
potcar_path2 = os.path.join(test_dir, 'POTCAR2.LiFePO4.gz')
vasprun = Vasprun(filepath, parse_potcar_file=False)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
vasprun.update_potcar_spec(potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
vasprun2 = Vasprun(filepath, parse_potcar_file=False)
self.assertRaises(ValueError, vasprun2.update_potcar_spec, potcar_path2)
vasprun = Vasprun(filepath, parse_potcar_file=potcar_path)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
self.assertRaises(ValueError, Vasprun, filepath, parse_potcar_file=potcar_path2)
def test_search_for_potcar(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath, parse_potcar_file=True)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003",
"hash": "65e83282d1707ec078c1012afbd05be8"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE Fe 06Sep2000",
"hash": "9530da8244e4dac17580869b4adab115"},
{"titel": "PAW_PBE P 17Jan2003",
"hash": "7dc3393307131ae67785a0cdacb61d5f"},
{"titel": "PAW_PBE O 08Apr2002",
"hash": "7a25bc5b9a5393f46600a4939d357982"}])
def test_potcar_not_found(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
#Ensure no potcar is found and nothing is updated
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
vasprun = Vasprun(filepath, parse_potcar_file='.')
self.assertEqual(len(w), 1)
self.assertEqual(vasprun.potcar_spec, [{"titel": "PAW_PBE Li 17Jan2003", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE Fe 06Sep2000", "hash": None},
{"titel": "PAW_PBE P 17Jan2003", "hash": None},
{"titel": "PAW_PBE O 08Apr2002", "hash": None}])
class OutcarTest(unittest.TestCase):
def test_init(self):
for f in ['OUTCAR', 'OUTCAR.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204,
'cores': '8'})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.as_dict())
filepath = os.path.join(test_dir, 'OUTCAR.stopped')
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
for f in ['OUTCAR.lepsilon', 'OUTCAR.lepsilon.gz']:
filepath = os.path.join(test_dir, f)
outcar = Outcar(filepath)
outcar.read_lepsilon()
outcar.read_lepsilon_ionic()
self.assertAlmostEqual(outcar.dielectric_tensor[0][0], 3.716432)
self.assertAlmostEqual(outcar.dielectric_tensor[0][1], -0.20464)
self.assertAlmostEqual(outcar.dielectric_tensor[1][2], -0.20464)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][0], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[0][2], 0.001419)
self.assertAlmostEqual(outcar.dielectric_ionic_tensor[2][2], 0.001419)
self.assertAlmostEqual(outcar.piezo_tensor[0][0], 0.52799)
self.assertAlmostEqual(outcar.piezo_tensor[1][3], 0.35998)
self.assertAlmostEqual(outcar.piezo_tensor[2][5], 0.35997)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[0][0], 0.05868)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[1][3], 0.06241)
self.assertAlmostEqual(outcar.piezo_ionic_tensor[2][5], 0.06242)
self.assertAlmostEqual(outcar.born[0][1][2], -0.385)
self.assertAlmostEqual(outcar.born[1][2][0], 0.36465)
def test_elastic_tensor(self):
filepath = os.path.join(test_dir, "OUTCAR.total_tensor.Li2O.gz")
outcar = Outcar(filepath)
elastic_tensor = outcar.elastic_tensor
self.assertAlmostEqual(elastic_tensor[0][0], 1986.3391)
self.assertAlmostEqual(elastic_tensor[0][1], 187.8324)
self.assertAlmostEqual(elastic_tensor[3][3], 586.3034)
def test_core_state_eigen(self):
filepath = os.path.join(test_dir, "OUTCAR.CL")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
def test_single_atom(self):
filepath = os.path.join(test_dir, "OUTCAR.Al")
outcar = Outcar(filepath)
expected_mag = ({u'p': 0.0, u's': 0.0, u'd': 0.0, u'tot': 0.0},)
expected_chg = ({u'p': 0.343, u's': 0.425, u'd': 0.0, u'tot': 0.768},)
self.assertAlmostEqual(outcar.magnetization, expected_mag)
self.assertAlmostEqual(outcar.charge, expected_chg)
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.592,
'Total CPU time used (sec)': 50.194,
'Elapsed time (sec)': 52.337,
'Maximum memory used (kb)': 62900.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 49.602,
'cores': '32'})
self.assertAlmostEqual(outcar.efermi, 8.0942)
self.assertAlmostEqual(outcar.nelect, 3)
self.assertAlmostEqual(outcar.total_mag, 8.2e-06)
self.assertIsNotNone(outcar.as_dict())
class BSVasprunTest(unittest.TestCase):
def test_get_band_structure(self):
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = BSVasprun(filepath, parse_potcar_file=False)
bs = vasprun.get_band_structure(kpoints_filename=
os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64],
"wrong vbm kpoint index")
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
class OszicarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OSZICAR')
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'LOCPOT')
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'CHGCAR.nospin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
#test sum
chg += chg
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
filepath = os.path.join(test_dir, 'CHGCAR.Fe3O4')
chg = Chgcar.from_file(filepath)
ans = [1.93313368, 3.91201473, 4.11858277, 4.1240093, 4.10634989,
3.38864822]
myans = chg.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
class ProcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'PROCAR.simple')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(1, 'd'), 0)
self.assertAlmostEqual(p.get_occupation(1, 's'), 0.3538125)
self.assertAlmostEqual(p.get_occupation(1, 'p'), 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nb_bands, 10)
self.assertEqual(p.nb_kpoints, 10)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[1][2][2], {'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = os.path.join(test_dir, 'PROCAR')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd'), 4.3698147704200059)
self.assertAlmostEqual(p.get_occupation(0, 'dxy'), 0.85796295426000124)
class XdatcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'XDATCAR_4')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
filepath = os.path.join(test_dir, 'XDATCAR_5')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 4)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
class DynmatTest(unittest.TestCase):
def test_init(self):
# nosetests pymatgen/io/vasp/tests/test_outputs.py:DynmatTest.test_init
filepath = os.path.join(test_dir, 'DYNMAT')
d = Dynmat(filepath)
self.assertEqual(d.nspecs, 2)
self.assertEqual(d.natoms, 6)
self.assertEqual(d.ndisps, 3)
self.assertTrue(np.allclose(d.masses, [63.546, 196.966]))
self.assertTrue(4 in d.data)
self.assertTrue(2 in d.data[4])
self.assertTrue(np.allclose(
d.data[4][2]['dispvec'], [0., 0.05, 0.]
))
self.assertTrue(np.allclose(
d.data[4][2]['dynmat'][3], [0.055046, -0.298080, 0.]
))
# TODO: test get_phonon_frequencies once cross-checked
if __name__ == "__main__":
unittest.main()
| migueldiascosta/pymatgen | pymatgen/io/vasp/tests/test_outputs.py | Python | mit | 27,721 |
import argparse
import numpy as np
import scipy.linalg
import subprocess
from shutil import copyfile
from functools import partial
import sys
import time
try:
import pickle as cpickle
except:
try:
import cpickle
except:
import _pickle as cpickle
# Add path to HydroGrid and import module
# sys.path.append('../../HydroGrid/src/')
# Find project functions
found_functions = False
path_to_append = ''
while found_functions is False:
try:
import multi_bodies_functions
from mobility import mobility as mb
from quaternion_integrator.quaternion import Quaternion
from quaternion_integrator.quaternion_integrator_multi_bodies import QuaternionIntegrator
from quaternion_integrator.quaternion_integrator_rollers import QuaternionIntegratorRollers
from body import body
from read_input import read_input
from read_input import read_vertex_file
from read_input import read_clones_file
from read_input import read_slip_file
import utils
try:
import libCallHydroGrid as cc
found_HydroGrid = True
except ImportError:
found_HydroGrid = False
found_functions = True
except ImportError:
path_to_append += '../'
print('searching functions in path ', path_to_append)
sys.path.append(path_to_append)
if len(path_to_append) > 21:
print('\nProjected functions not found. Edit path in multi_bodies.py')
sys.exit()
def calc_slip(bodies, Nblobs):
'''
Function to calculate the slip in all the blobs.
'''
slip = np.empty((Nblobs, 3))
offset = 0
for b in bodies:
slip_b = b.calc_slip()
slip[offset:offset+b.Nblobs] = slip_b
offset += b.Nblobs
return slip
def get_blobs_r_vectors(bodies, Nblobs):
'''
Return coordinates of all the blobs with shape (Nblobs, 3).
'''
r_vectors = np.empty((Nblobs, 3))
offset = 0
for b in bodies:
num_blobs = b.Nblobs
r_vectors[offset:(offset+num_blobs)] = b.get_r_vectors()
offset += num_blobs
return r_vectors
def set_mobility_blobs(implementation):
'''
Set the function to compute the dense mobility
at the blob level to the right implementation.
The implementation in C++ is much faster than
the one python; to use it the user should compile
the file mobility/mobility_ext.cc.
These functions return an array with shape
(3*Nblobs, 3*Nblobs).
'''
# Implementations without wall
if implementation == 'python_no_wall':
return mb.rotne_prager_tensor
elif implementation == 'C++_no_wall':
return mb.boosted_infinite_fluid_mobility
# Implementations with wall
elif implementation == 'python':
return mb.single_wall_fluid_mobility
elif implementation == 'C++':
return mb.boosted_single_wall_fluid_mobility
def set_mobility_vector_prod(implementation):
'''
Set the function to compute the matrix-vector
product (M*F) with the mobility defined at the blob
level to the right implementation.
The implementation in pycuda is much faster than the
one in C++, which is much faster than the one python;
To use the pycuda implementation is necessary to have
installed pycuda and a GPU with CUDA capabilities. To
use the C++ implementation the user has to compile
the file mobility/mobility_ext.cc.
'''
# Implementations without wall
if implementation == 'python_no_wall':
return mb.no_wall_fluid_mobility_product
elif implementation == 'C++_no_wall':
return mb.boosted_no_wall_mobility_vector_product
elif implementation == 'pycuda_no_wall':
return mb.no_wall_mobility_trans_times_force_pycuda
elif implementation == 'numba_no_wall':
return mb.no_wall_mobility_trans_times_force_numba
# Implementations with wall
elif implementation == 'python':
return mb.single_wall_fluid_mobility_product
elif implementation == 'C++':
return mb.boosted_mobility_vector_product
elif implementation == 'pycuda':
return mb.single_wall_mobility_trans_times_force_pycuda
elif implementation == 'numba':
return mb.single_wall_mobility_trans_times_force_numba
def calc_K_matrix(bodies, Nblobs):
'''
Calculate the geometric block-diagonal matrix K.
Shape (3*Nblobs, 6*Nbodies).
'''
K = np.zeros((3*Nblobs, 6*len(bodies)))
offset = 0
for k, b in enumerate(bodies):
K_body = b.calc_K_matrix()
K[3*offset:3*(offset+b.Nblobs), 6*k:6*k+6] = K_body
offset += b.Nblobs
return K
def calc_K_matrix_bodies(bodies, Nblobs):
'''
Calculate the geometric matrix K for
each body. List of shape (3*Nblobs, 6*Nbodies).
'''
K = []
for k, b in enumerate(bodies):
K_body = b.calc_K_matrix()
K.append(K_body)
return K
def K_matrix_vector_prod(bodies, vector, Nblobs, K_bodies = None):
'''
Compute the matrix vector product K*vector where
K is the geometrix matrix that transport the information from the
level of describtion of the body to the level of describtion of the blobs.
'''
# Prepare variables
result = np.empty((Nblobs, 3))
v = np.reshape(vector, (len(bodies) * 6))
# Loop over bodies
offset = 0
for k, b in enumerate(bodies):
if K_bodies is None:
K = b.calc_K_matrix()
else:
K = K_bodies[k]
result[offset : offset+b.Nblobs] = np.reshape(np.dot(K, v[6*k : 6*(k+1)]), (b.Nblobs, 3))
offset += b.Nblobs
return result
def K_matrix_T_vector_prod(bodies, vector, Nblobs, K_bodies = None):
'''
Compute the matrix vector product K^T*vector where
K is the geometrix matrix that transport the information from the
level of describtion of the body to the level of describtion of the blobs.
'''
# Prepare variables
result = np.empty((len(bodies), 6))
v = np.reshape(vector, (Nblobs * 3))
# Loop over bodies
offset = 0
for k, b in enumerate(bodies):
if K_bodies is None:
K = b.calc_K_matrix()
else:
K = K_bodies[k]
result[k : k+1] = np.dot(K.T, v[3*offset : 3*(offset+b.Nblobs)])
offset += b.Nblobs
result = np.reshape(result, (2*len(bodies), 3))
return result
def linear_operator_rigid(vector, bodies, r_vectors, eta, a, K_bodies = None, *args, **kwargs):
'''
Return the action of the linear operator of the rigid body on vector v.
The linear operator is
| M -K|
| -K^T 0|
'''
# Reserve memory for the solution and create some variables
L = kwargs.get('periodic_length')
Ncomp_blobs = r_vectors.size
Nblobs = r_vectors.size // 3
Ncomp_bodies = 6 * len(bodies)
res = np.empty((Ncomp_blobs + Ncomp_bodies))
v = np.reshape(vector, (vector.size//3, 3))
# Compute the "slip" part
res[0:Ncomp_blobs] = mobility_vector_prod(r_vectors, vector[0:Ncomp_blobs], eta, a, *args, **kwargs)
K_times_U = K_matrix_vector_prod(bodies, v[Nblobs : Nblobs+2*len(bodies)], Nblobs, K_bodies = K_bodies)
res[0:Ncomp_blobs] -= np.reshape(K_times_U, (3*Nblobs))
# Compute the "-force_torque" part
K_T_times_lambda = K_matrix_T_vector_prod(bodies, vector[0:Ncomp_blobs], Nblobs, K_bodies = K_bodies)
res[Ncomp_blobs : Ncomp_blobs+Ncomp_bodies] = -np.reshape(K_T_times_lambda, (Ncomp_bodies))
return res
@utils.static_var('mobility_bodies', [])
@utils.static_var('K_bodies', [])
@utils.static_var('M_factorization_blobs', [])
@utils.static_var('M_factorization_blobs_inv', [])
@utils.static_var('mobility_inv_blobs', [])
def build_block_diagonal_preconditioners_det_stoch(bodies, r_vectors, Nblobs, eta, a, *args, **kwargs):
'''
Build the deterministic and stochastic block diagonal preconditioners for rigid bodies.
It solves exactly the mobility problem for each body
independently, i.e., no interation between bodies is taken
into account.
If the mobility of a body at the blob
level is M=L^T * L with L the Cholesky factor we form the stochastic preconditioners
P = inv(L)
P_inv = L
and the deterministic preconditioner
N = (K.T * M^{-1} * K)^{-1}
and return the functions to compute matrix vector products
y = (P.T * M * P) * x
y = P_inv * x
y = N*F - N*K.T*M^{-1}*slip
'''
mobility_bodies = []
K_bodies = []
M_factorization_blobs = []
M_factorization_blobs_inv = []
mobility_inv_blobs = []
if(kwargs.get('step') % kwargs.get('update_PC') == 0) or len(build_block_diagonal_preconditioners_det_stoch.mobility_bodies) == 0:
# Loop over bodies
for b in bodies:
# 1. Compute blobs mobility
M = b.calc_mobility_blobs(eta, a)
# 2. Compute Cholesy factorization, M = L^T * L
L, lower = scipy.linalg.cho_factor(M)
L = np.triu(L)
M_factorization_blobs.append(L.T)
# 3. Compute inverse of L
M_factorization_blobs_inv.append(scipy.linalg.solve_triangular(L, np.eye(b.Nblobs * 3), check_finite=False))
# 4. Compute inverse mobility blobs
mobility_inv_blobs.append(scipy.linalg.solve_triangular(L, scipy.linalg.solve_triangular(L, np.eye(b.Nblobs * 3), trans='T', check_finite=False), check_finite=False))
# 5. Compute geometric matrix K
K = b.calc_K_matrix()
K_bodies.append(K)
# 6. Compute body mobility
mobility_bodies.append(np.linalg.pinv(np.dot(K.T, scipy.linalg.cho_solve((L,lower), K, check_finite=False))))
# Save variables to use in next steps if PC is not updated
build_block_diagonal_preconditioners_det_stoch.mobility_bodies = mobility_bodies
build_block_diagonal_preconditioners_det_stoch.K_bodies = K_bodies
build_block_diagonal_preconditioners_det_stoch.M_factorization_blobs = M_factorization_blobs
build_block_diagonal_preconditioners_det_stoch.M_factorization_blobs_inv = M_factorization_blobs_inv
build_block_diagonal_preconditioners_det_stoch.mobility_inv_blobs = mobility_inv_blobs
else:
# Use old values
mobility_bodies = build_block_diagonal_preconditioners_det_stoch.mobility_bodies
K_bodies = build_block_diagonal_preconditioners_det_stoch.K_bodies
M_factorization_blobs = build_block_diagonal_preconditioners_det_stoch.M_factorization_blobs
M_factorization_blobs_inv = build_block_diagonal_preconditioners_det_stoch.M_factorization_blobs_inv
mobility_inv_blobs = build_block_diagonal_preconditioners_det_stoch.mobility_inv_blobs
def block_diagonal_preconditioner(vector, bodies = None, mobility_bodies = None, mobility_inv_blobs = None, K_bodies = None, Nblobs = None, *args, **kwargs):
'''
Apply the block diagonal preconditioner.
'''
result = np.empty(vector.shape)
offset = 0
for k, b in enumerate(bodies):
# 1. Solve M*Lambda_tilde = slip
slip = vector[3*offset : 3*(offset + b.Nblobs)]
Lambda_tilde = np.dot(mobility_inv_blobs[k], slip)
# 2. Compute rigid body velocity
F = vector[3*Nblobs + 6*k : 3*Nblobs + 6*(k+1)]
Y = np.dot(mobility_bodies[k], -F - np.dot(K_bodies[k].T, Lambda_tilde))
# 3. Solve M*Lambda = (slip + K*Y)
result[3*offset : 3*(offset + b.Nblobs)] = np.dot(mobility_inv_blobs[k], slip + np.dot(K_bodies[k], Y))
# 4. Set result
result[3*Nblobs + 6*k : 3*Nblobs + 6*(k+1)] = Y
offset += b.Nblobs
return result
block_diagonal_preconditioner_partial = partial(block_diagonal_preconditioner,
bodies = bodies,
mobility_bodies = mobility_bodies,
mobility_inv_blobs = mobility_inv_blobs,
K_bodies = K_bodies,
Nblobs = Nblobs)
# Define preconditioned mobility matrix product
def mobility_pc(w, bodies = None, P = None, r_vectors = None, eta = None, a = None, *args, **kwargs):
result = np.empty_like(w)
# Apply P
offset = 0
for k, b in enumerate(bodies):
result[3*offset : 3*(offset + b.Nblobs)] = np.dot(P[k], w[3*offset : 3*(offset + b.Nblobs)])
offset += b.Nblobs
# Multiply by M
result_2 = mobility_vector_prod(r_vectors, result, eta, a, *args, **kwargs)
# Apply P.T
offset = 0
for k, b in enumerate(bodies):
result[3*offset : 3*(offset + b.Nblobs)] = np.dot(P[k].T, result_2[3*offset : 3*(offset + b.Nblobs)])
offset += b.Nblobs
return result
mobility_pc_partial = partial(mobility_pc, bodies = bodies, P = M_factorization_blobs_inv, r_vectors = r_vectors, eta = eta, a = a, *args, **kwargs)
# Define inverse preconditioner P_inv
def P_inv_mult(w, bodies = None, P_inv = None):
offset = 0
for k, b in enumerate(bodies):
w[3*offset : 3*(offset + b.Nblobs)] = np.dot(P_inv[k], w[3*offset : 3*(offset + b.Nblobs)])
offset += b.Nblobs
return w
P_inv_mult_partial = partial(P_inv_mult, bodies = bodies, P_inv = M_factorization_blobs)
# Return preconditioner functions
return block_diagonal_preconditioner_partial, mobility_pc_partial, P_inv_mult_partial
@utils.static_var('mobility_bodies', [])
@utils.static_var('K_bodies', [])
@utils.static_var('mobility_inv_blobs', [])
def build_block_diagonal_preconditioner(bodies, r_vectors, Nblobs, eta, a, *args, **kwargs):
'''
Build the block diagonal preconditioner for rigid bodies.
It solves exactly the mobility problem for each body
independently, i.e., no interation between bodies is taken
into account.
'''
mobility_inv_blobs = []
mobility_bodies = []
K_bodies = []
if(kwargs.get('step') % kwargs.get('update_PC') == 0) or len(build_block_diagonal_preconditioner.mobility_bodies) == 0:
# Loop over bodies
for b in bodies:
# 1. Compute blobs mobility and invert it
M = b.calc_mobility_blobs(eta, a)
# 2. Compute Cholesy factorization, M = L^T * L
L, lower = scipy.linalg.cho_factor(M)
L = np.triu(L)
# 3. Compute inverse mobility blobs
mobility_inv_blobs.append(scipy.linalg.solve_triangular(L, scipy.linalg.solve_triangular(L, np.eye(b.Nblobs * 3), trans='T', check_finite=False), check_finite=False))
# 4. Compute geometric matrix K
K = b.calc_K_matrix()
K_bodies.append(K)
# 5. Compute body mobility
mobility_bodies.append(np.linalg.pinv(np.dot(K.T, scipy.linalg.cho_solve((L,lower), K, check_finite=False))))
# Save variables to use in next steps if PC is not updated
build_block_diagonal_preconditioner.mobility_bodies = mobility_bodies
build_block_diagonal_preconditioner.K_bodies = K_bodies
build_block_diagonal_preconditioner.mobility_inv_blobs = mobility_inv_blobs
else:
# Use old values
mobility_bodies = build_block_diagonal_preconditioner.mobility_bodies
K_bodies = build_block_diagonal_preconditioner.K_bodies
mobility_inv_blobs = build_block_diagonal_preconditioner.mobility_inv_blobs
def block_diagonal_preconditioner(vector, bodies = None, mobility_bodies = None, mobility_inv_blobs = None, K_bodies = None, Nblobs = None):
'''
Apply the block diagonal preconditioner.
'''
result = np.empty(vector.shape)
offset = 0
for k, b in enumerate(bodies):
# 1. Solve M*Lambda_tilde = slip
slip = vector[3*offset : 3*(offset + b.Nblobs)]
Lambda_tilde = np.dot(mobility_inv_blobs[k], slip)
# 2. Compute rigid body velocity
F = vector[3*Nblobs + 6*k : 3*Nblobs + 6*(k+1)]
Y = np.dot(mobility_bodies[k], -F - np.dot(K_bodies[k].T, Lambda_tilde))
# 3. Solve M*Lambda = (slip + K*Y)
Lambda = np.dot(mobility_inv_blobs[k], slip + np.dot(K_bodies[k], Y))
# 4. Set result
result[3*offset : 3*(offset + b.Nblobs)] = Lambda
result[3*Nblobs + 6*k : 3*Nblobs + 6*(k+1)] = Y
offset += b.Nblobs
return result
block_diagonal_preconditioner_partial = partial(block_diagonal_preconditioner,
bodies = bodies,
mobility_bodies = mobility_bodies,
mobility_inv_blobs = mobility_inv_blobs,
K_bodies = K_bodies,
Nblobs = Nblobs)
return block_diagonal_preconditioner_partial
def block_diagonal_preconditioner(vector, bodies, mobility_bodies, mobility_inv_blobs, Nblobs):
'''
Block diagonal preconditioner for rigid bodies.
It solves exactly the mobility problem for each body
independently, i.e., no interation between bodies is taken
into account.
'''
result = np.empty(vector.shape)
offset = 0
for k, b in enumerate(bodies):
# 1. Solve M*Lambda_tilde = slip
slip = vector[3*offset : 3*(offset + b.Nblobs)]
Lambda_tilde = np.dot(mobility_inv_blobs[k], slip)
# 2. Compute rigid body velocity
F = vector[3*Nblobs + 6*k : 3*Nblobs + 6*(k+1)]
Y = np.dot(mobility_bodies[k], -F - np.dot(b.calc_K_matrix().T, Lambda_tilde))
# 3. Solve M*Lambda = (slip + K*Y)
Lambda = np.dot(mobility_inv_blobs[k], slip + np.dot(b.calc_K_matrix(), Y))
# 4. Set result
result[3*offset : 3*(offset + b.Nblobs)] = Lambda
result[3*Nblobs + 6*k : 3*Nblobs + 6*(k+1)] = Y
offset += b.Nblobs
return result
def build_stochastic_block_diagonal_preconditioner(bodies, r_vectors, eta, a, *args, **kwargs):
'''
Build block diagonal preconditioner to generate the noise
for rigid bodies. If the mobility of a body at the blob
level is M=L^T * L with L the Cholesky factor we form the stochastic preconditioners
P = inv(L)
P_inv = L
and return the functions to compute matrix vector products
y = (P.T * M * P) * x
y = P_inv * x
'''
P = []
P_inv = []
for b in bodies:
# Compute blobs mobility for one body
M = b.calc_mobility_blobs(eta, a)
# 2. Compute Cholesy factorization, M = L^T * L
L, lower = scipy.linalg.cho_factor(M)
L = np.triu(L)
P_inv.append(L.T)
# Form preconditioners version P
P.append(scipy.linalg.solve_triangular(L, np.eye(b.Nblobs * 3), check_finite=False))
# Define preconditioned mobility matrix product
def mobility_pc(w, bodies = None, P = None, r_vectors = None, eta = None, a = None, *args, **kwargs):
result = np.empty_like(w)
# Multiply by P.T
offset = 0
for k, b in enumerate(bodies):
result[3*offset : 3*(offset + b.Nblobs)] = np.dot(P[k], w[3*offset : 3*(offset + b.Nblobs)])
offset += b.Nblobs
# Multiply by M
result_2 = mobility_vector_prod(r_vectors, result, eta, a, *args, **kwargs)
# Multiply by P
offset = 0
for k, b in enumerate(bodies):
result[3*offset : 3*(offset + b.Nblobs)] = np.dot(P[k].T, result_2[3*offset : 3*(offset + b.Nblobs)])
offset += b.Nblobs
return result
mobility_pc_partial = partial(mobility_pc, bodies = bodies, P = P, r_vectors = r_vectors, eta = eta, a = a, *args, **kwargs)
# Define inverse preconditioner P_inv
def P_inv_mult(w, bodies = None, P_inv = None):
offset = 0
for k, b in enumerate(bodies):
w[3*offset : 3*(offset + b.Nblobs)] = np.dot(P_inv[k], w[3*offset : 3*(offset + b.Nblobs)])
offset += b.Nblobs
return w
P_inv_mult_partial = partial(P_inv_mult, bodies = bodies, P_inv = P_inv)
# Return preconditioner functions
return mobility_pc_partial, P_inv_mult_partial
if __name__ == '__main__':
# Get command line arguments
parser = argparse.ArgumentParser(description='Run a multi-body simulation and save trajectory.')
parser.add_argument('--input-file', dest='input_file', type=str, default='data.main', help='name of the input file')
parser.add_argument('--print-residual', action='store_true', help='print gmres and lanczos residuals')
args=parser.parse_args()
input_file = args.input_file
# Read input file
read = read_input.ReadInput(input_file)
# Set some variables for the simulation
n_steps = read.n_steps
n_save = read.n_save
n_relaxation = read.n_relaxation
dt = read.dt
eta = read.eta
g = read.g
a = read.blob_radius
scheme = read.scheme
output_name = read.output_name
structures = read.structures
structures_ID = read.structures_ID
mobility_vector_prod = set_mobility_vector_prod(read.mobility_vector_prod_implementation)
multi_bodies_functions.calc_blob_blob_forces = multi_bodies_functions.set_blob_blob_forces(read.blob_blob_force_implementation)
multi_bodies_functions.calc_body_body_forces_torques = multi_bodies_functions.set_body_body_forces_torques(read.body_body_force_torque_implementation)
# Copy input file to output
#subprocess.call(["cp", input_file, output_name + '.inputfile'])
copyfile(input_file, output_name + '.inputfile')
# Set random generator state
if read.random_state is not None:
with open(read.random_state, 'rb') as f:
np.random.set_state(cpickle.load(f))
elif read.seed is not None:
np.random.seed(int(read.seed))
# Save random generator state
with open(output_name + '.random_state', 'wb') as f:
cpickle.dump(np.random.get_state(), f)
# Create rigid bodies
bodies = []
body_types = []
body_names = []
for ID, structure in enumerate(structures):
print('Creating structures = ', structure[1])
# Read vertex and clones files
struct_ref_config = read_vertex_file.read_vertex_file(structure[0])
num_bodies_struct, struct_locations, struct_orientations = read_clones_file.read_clones_file(structure[1])
# Read slip file if it exists
slip = None
if(len(structure) > 2):
slip = read_slip_file.read_slip_file(structure[2])
body_types.append(num_bodies_struct)
body_names.append(structures_ID[ID])
# Create each body of type structure
for i in range(num_bodies_struct):
b = body.Body(struct_locations[i], struct_orientations[i], struct_ref_config, a)
b.mobility_blobs = set_mobility_blobs(read.mobility_blobs_implementation)
b.ID = structures_ID[ID]
# Calculate body length for the RFD
if i == 0:
b.calc_body_length()
else:
b.body_length = bodies[-1].body_length
multi_bodies_functions.set_slip_by_ID(b, slip)
# Append bodies to total bodies list
bodies.append(b)
bodies = np.array(bodies)
# Set some more variables
num_of_body_types = len(body_types)
num_bodies = bodies.size
Nblobs = sum([x.Nblobs for x in bodies])
# Save bodies information
with open(output_name + '.bodies_info', 'w') as f:
f.write('num_of_body_types ' + str(num_of_body_types) + '\n')
f.write('body_names ' + str(body_names) + '\n')
f.write('body_types ' + str(body_types) + '\n')
f.write('num_bodies ' + str(num_bodies) + '\n')
f.write('num_blobs ' + str(Nblobs) + '\n')
# Create integrator
if scheme.find('rollers') == -1:
integrator = QuaternionIntegrator(bodies, Nblobs, scheme, tolerance = read.solver_tolerance, domain = read.domain)
else:
integrator = QuaternionIntegratorRollers(bodies, Nblobs, scheme, tolerance = read.solver_tolerance, domain = read.domain,
mobility_vector_prod_implementation = read.mobility_vector_prod_implementation)
integrator.calc_one_blob_forces = partial(multi_bodies_functions.calc_one_blob_forces,
g = g,
repulsion_strength_wall = read.repulsion_strength_wall,
debye_length_wall = read.debye_length_wall)
integrator.calc_blob_blob_forces = partial(multi_bodies_functions.calc_blob_blob_forces,
g = g,
repulsion_strength_wall = read.repulsion_strength_wall,
debye_length_wall = read.debye_length_wall,
repulsion_strength = read.repulsion_strength,
debye_length = read.debye_length,
periodic_length = read.periodic_length)
integrator.omega_one_roller = read.omega_one_roller
integrator.free_kinematics = read.free_kinematics
integrator.hydro_interactions = read.hydro_interactions
integrator.calc_slip = calc_slip
integrator.get_blobs_r_vectors = get_blobs_r_vectors
integrator.mobility_blobs = set_mobility_blobs(read.mobility_blobs_implementation)
integrator.force_torque_calculator = partial(multi_bodies_functions.force_torque_calculator_sort_by_bodies,
g = g,
repulsion_strength_wall = read.repulsion_strength_wall,
debye_length_wall = read.debye_length_wall,
repulsion_strength = read.repulsion_strength,
debye_length = read.debye_length,
periodic_length = read.periodic_length)
integrator.calc_K_matrix_bodies = calc_K_matrix_bodies
integrator.calc_K_matrix = calc_K_matrix
integrator.linear_operator = linear_operator_rigid
integrator.preconditioner = block_diagonal_preconditioner
integrator.build_block_diagonal_preconditioner = build_block_diagonal_preconditioner
integrator.build_block_diagonal_preconditioners_det_stoch = build_block_diagonal_preconditioners_det_stoch
integrator.eta = eta
integrator.a = a
integrator.first_guess = np.zeros(Nblobs*3 + num_bodies*6)
integrator.kT = read.kT
integrator.mobility_vector_prod = mobility_vector_prod
integrator.K_matrix_T_vector_prod = K_matrix_T_vector_prod
integrator.K_matrix_vector_prod = K_matrix_vector_prod
integrator.build_stochastic_block_diagonal_preconditioner = build_stochastic_block_diagonal_preconditioner
integrator.preprocess = multi_bodies_functions.preprocess
integrator.postprocess = multi_bodies_functions.postprocess
integrator.periodic_length = read.periodic_length
integrator.update_PC = read.update_PC
integrator.print_residual = args.print_residual
integrator.rf_delta = read.rf_delta
# Initialize HydroGrid library:
if found_HydroGrid and read.call_HydroGrid:
cc.calculate_concentration(output_name,
read.periodic_length[0],
read.periodic_length[1],
int(read.green_particles[0]),
int(read.green_particles[1]),
int(read.cells[0]),
int(read.cells[1]),
0,
dt * read.sample_HydroGrid,
Nblobs,
0,
get_blobs_r_vectors(bodies, Nblobs))
# Loop over time steps
start_time = time.time()
if read.save_clones == 'one_file':
output_files = []
for i, ID in enumerate(structures_ID):
name = output_name + '.' + ID + '.config'
output_files.append(open(name, 'w'))
for step in range(read.initial_step, n_steps):
# Save data if...
if (step % n_save) == 0 and step >= 0:
elapsed_time = time.time() - start_time
print('Integrator = ', scheme, ', step = ', step, ', invalid configurations', integrator.invalid_configuration_count, ', wallclock time = ', time.time() - start_time)
# For each type of structure save locations and orientations to one file
body_offset = 0
if read.save_clones == 'one_file_per_step':
for i, ID in enumerate(structures_ID):
name = output_name + '.' + ID + '.' + str(step).zfill(8) + '.clones'
with open(name, 'w') as f_ID:
f_ID.write(str(body_types[i]) + '\n')
for j in range(body_types[i]):
orientation = bodies[body_offset + j].orientation.entries
f_ID.write('%s %s %s %s %s %s %s\n' % (bodies[body_offset + j].location[0],
bodies[body_offset + j].location[1],
bodies[body_offset + j].location[2],
orientation[0],
orientation[1],
orientation[2],
orientation[3]))
body_offset += body_types[i]
elif read.save_clones == 'one_file':
for i, f_ID in enumerate(output_files):
f_ID.write(str(body_types[i]) + '\n')
for j in range(body_types[i]):
orientation = bodies[body_offset + j].orientation.entries
f_ID.write('%s %s %s %s %s %s %s\n' % (bodies[body_offset + j].location[0],
bodies[body_offset + j].location[1],
bodies[body_offset + j].location[2],
orientation[0],
orientation[1],
orientation[2],
orientation[3]))
body_offset += body_types[i]
else:
print('Error, save_clones =', read.save_clones, 'is not implemented.')
print('Use \"one_file_per_step\" or \"one_file\". \n')
break
# Save mobilities
if read.save_blobs_mobility == 'True' or read.save_body_mobility == 'True':
r_vectors_blobs = integrator.get_blobs_r_vectors(bodies, Nblobs)
mobility_blobs = integrator.mobility_blobs(r_vectors_blobs, read.eta, read.blob_radius)
if read.save_blobs_mobility == 'True':
name = output_name + '.blobs_mobility.' + str(step).zfill(8) + '.dat'
np.savetxt(name, mobility_blobs, delimiter=' ')
if read.save_body_mobility == 'True':
resistance_blobs = np.linalg.inv(mobility_blobs)
K = integrator.calc_K_matrix(bodies, Nblobs)
resistance_bodies = np.dot(K.T, np.dot(resistance_blobs, K))
mobility_bodies = np.linalg.pinv(np.dot(K.T, np.dot(resistance_blobs, K)))
name = output_name + '.body_mobility.' + str(step).zfill(8) + '.dat'
np.savetxt(name, mobility_bodies, delimiter=' ')
# Update HydroGrid
if (step % read.sample_HydroGrid) == 0 and found_HydroGrid and read.call_HydroGrid:
cc.calculate_concentration(output_name,
read.periodic_length[0],
read.periodic_length[1],
int(read.green_particles[0]),
int(read.green_particles[1]),
int(read.cells[0]),
int(read.cells[1]),
step,
dt * read.sample_HydroGrid,
Nblobs,
1,
get_blobs_r_vectors(bodies, Nblobs))
# Save HydroGrid data
if read.save_HydroGrid > 0 and found_HydroGrid and read.call_HydroGrid:
if (step % read.save_HydroGrid) == 0:
cc.calculate_concentration(output_name,
read.periodic_length[0],
read.periodic_length[1],
int(read.green_particles[0]),
int(read.green_particles[1]),
int(read.cells[0]),
int(read.cells[1]),
step,
dt * read.sample_HydroGrid,
Nblobs,
2,
get_blobs_r_vectors(bodies, Nblobs))
# Advance time step
integrator.advance_time_step(dt, step = step)
# Save final data if...
if ((step+1) % n_save) == 0 and step >= 0:
print('Integrator = ', scheme, ', step = ', step+1, ', invalid configurations', integrator.invalid_configuration_count, ', wallclock time = ', time.time() - start_time)
# For each type of structure save locations and orientations to one file
body_offset = 0
if read.save_clones == 'one_file_per_step':
for i, ID in enumerate(structures_ID):
name = output_name + '.' + ID + '.' + str(step+1).zfill(8) + '.clones'
with open(name, 'w') as f_ID:
f_ID.write(str(body_types[i]) + '\n')
for j in range(body_types[i]):
orientation = bodies[body_offset + j].orientation.entries
f_ID.write('%s %s %s %s %s %s %s\n' % (bodies[body_offset + j].location[0],
bodies[body_offset + j].location[1],
bodies[body_offset + j].location[2],
orientation[0],
orientation[1],
orientation[2],
orientation[3]))
body_offset += body_types[i]
elif read.save_clones == 'one_file':
for i, f_ID in enumerate(output_files):
f_ID.write(str(body_types[i]) + '\n')
for j in range(body_types[i]):
orientation = bodies[body_offset + j].orientation.entries
f_ID.write('%s %s %s %s %s %s %s\n' % (bodies[body_offset + j].location[0],
bodies[body_offset + j].location[1],
bodies[body_offset + j].location[2],
orientation[0],
orientation[1],
orientation[2],
orientation[3]))
body_offset += body_types[i]
else:
print('Error, save_clones =', read.save_clones, 'is not implemented.')
print('Use \"one_file_per_step\" or \"one_file\". \n')
# Save mobilities
if read.save_blobs_mobility == 'True' or read.save_body_mobility == 'True':
r_vectors_blobs = integrator.get_blobs_r_vectors(bodies, Nblobs)
mobility_blobs = integrator.mobility_blobs(r_vectors_blobs, read.eta, read.blob_radius)
if read.save_blobs_mobility == 'True':
name = output_name + '.blobs_mobility.' + str(step+1).zfill(8) + '.dat'
np.savetxt(name, mobility_blobs, delimiter=' ')
if read.save_body_mobility == 'True':
resistance_blobs = np.linalg.inv(mobility_blobs)
K = integrator.calc_K_matrix(bodies, Nblobs)
resistance_bodies = np.dot(K.T, np.dot(resistance_blobs, K))
mobility_bodies = np.linalg.pinv(np.dot(K.T, np.dot(resistance_blobs, K)))
name = output_name + '.body_mobility.' + str(step+1).zfill(8) + '.dat'
np.savetxt(name, mobility_bodies, delimiter=' ')
# Update HydroGrid data
if ((step+1) % read.sample_HydroGrid) == 0 and found_HydroGrid and read.call_HydroGrid:
cc.calculate_concentration(output_name,
read.periodic_length[0],
read.periodic_length[1],
int(read.green_particles[0]),
int(read.green_particles[1]),
int(read.cells[0]),
int(read.cells[1]),
step+1,
dt * read.sample_HydroGrid,
Nblobs,
1,
get_blobs_r_vectors(bodies, Nblobs))
# Save HydroGrid data
if read.save_HydroGrid > 0 and found_HydroGrid and read.call_HydroGrid:
if ((step+1) % read.save_HydroGrid) == 0:
cc.calculate_concentration(output_name,
read.periodic_length[0],
read.periodic_length[1],
int(read.green_particles[0]),
int(read.green_particles[1]),
int(read.cells[0]),
int(read.cells[1]),
step+1,
dt * read.sample_HydroGrid,
Nblobs,
2,
get_blobs_r_vectors(bodies, Nblobs))
# Free HydroGrid
if found_HydroGrid and read.call_HydroGrid:
cc.calculate_concentration(output_name,
read.periodic_length[0],
read.periodic_length[1],
int(read.green_particles[0]),
int(read.green_particles[1]),
int(read.cells[0]),
int(read.cells[1]),
step+1,
dt * read.sample_HydroGrid,
Nblobs,
3,
get_blobs_r_vectors(bodies, Nblobs))
# Save wallclock time
with open(output_name + '.time', 'w') as f:
f.write(str(time.time() - start_time) + '\n')
# Save number of invalid configurations and number of iterations in the
# deterministic solvers and the Lanczos algorithm
with open(output_name + '.info', 'w') as f:
f.write('invalid_configuration_count = ' + str(integrator.invalid_configuration_count) + '\n'
+ 'deterministic_iterations_count = ' + str(integrator.det_iterations_count) + '\n'
+ 'stochastic_iterations_count = ' + str(integrator.stoch_iterations_count) + '\n')
print('\n\n\n# End')
| stochasticHydroTools/RigidMultiblobsWall | multi_bodies/examples/Rollers_Small_Example/multi_bodies.py | Python | gpl-3.0 | 38,017 |
import os
class suppress_stdout_stderr(object):
"""
Via http://stackoverflow.com/questions/11130156/suppress-stdout-stderr-print-from-python-functions
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
"""
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
| skoczen/dewey | dewey/util.py | Python | mit | 1,259 |
# jsb.plugs.common/learn.py
#
#
""" learn information items .. facts .. factoids. """
## jsb imports
from jsb.lib.callbacks import callbacks
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.utils.lazydict import LazyDict
from jsb.lib.persist import PlugPersist
## basic imports
import logging
## commands
def handle_learn(bot, event):
"""" set an information item. """
if not event.rest: event.missing("<item> is <description>") ; return
try: (what, description) = event.rest.split(" is ", 1)
except ValueError: event.missing("<item> is <description>") ; return
what = what.lower()
items = PlugPersist(event.channel)
if not items.data: items.data = LazyDict()
if not items.data.has_key(what): items.data[what] = []
if description not in items.data[what]: items.data[what].append(description)
items.save()
event.reply("%s item added to %s database" % (what, event.channel))
cmnds.add('learn', handle_learn, ['USER', 'GUEST'])
examples.add('learn', 'learn the bot a description of an item.', "learn dunk is botpapa")
def handle_forget(bot, event):
"""" set an information item. """
if not event.rest: event.missing("<item> and <match>") ; return
try: (what, match) = event.rest.split(" and ", 2)
except ValueError: event.missing("<item> and <match>") ; return
what = what.lower()
items = PlugPersist(event.channel)
if not items.data: items.data = LazyDict()
if items.data.has_key(what):
for i in range(len(items.data[what])):
if match in items.data[what][i]:
del items.data[what][i]
items.save()
break
event.reply("item removed from %s database" % event.channel)
cmnds.add('forget', handle_forget, ['USER'])
examples.add('forget', 'forget a description of an item.', "forget dunk and botpapa")
def handle_whatis(bot, event):
items = PlugPersist(event.channel)
what = event.rest.lower().split('!')[0].strip()
if what in items.data and items.data[what]: event.reply("%s is " % event.rest, items.data[what], dot=", ")
else: event.reply("no information known about %s" % what)
cmnds.add('whatis', handle_whatis, ['USER', 'GUEST'])
examples.add("whatis", "whatis learned about a subject", "whatis jsb")
def handle_items(bot, event):
items = PlugPersist(event.channel).data.keys()
event.reply("i know %s items: " % len(items), items)
cmnds.add('items', handle_items, ['USER', 'GUEST'])
examples.add("items", "show what items the bot knows", "items")
def prelearn(bot, event):
if event.txt and event.txt[0] == "?" and not event.forwarded: return True
return False
def learncb(bot, event):
event.bind(bot)
items = PlugPersist(event.channel)
target = event.txt[1:].lower().split('!')[0].strip()
if target in items.data: event.reply("%s is " % target, items.data[target], dot=", ")
event.ready()
callbacks.add("PRIVMSG", learncb, prelearn)
callbacks.add("MESSAGE", learncb, prelearn)
callbacks.add("DISPATCH", learncb, prelearn)
callbacks.add("CONSOLE", learncb, prelearn)
callbacks.add("CMND", learncb, prelearn)
| melmothx/jsonbot | jsb/plugs/common/learn.py | Python | mit | 3,168 |
#!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"1. Top of file",
["BRAILLE LINE: 'Foo'",
" VISIBLE: 'Foo', cursor=1",
"SPEECH OUTPUT: 'Foo'",
"SPEECH OUTPUT: 'link.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift>Tab"))
sequence.append(utils.AssertPresentationAction(
"2. Shift Tab",
["BRAILLE LINE: ''",
" VISIBLE: '', cursor=1",
"SPEECH OUTPUT: 'document frame'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"3. Tab",
["BRAILLE LINE: 'Foo'",
" VISIBLE: 'Foo', cursor=1",
"BRAILLE LINE: 'Foo'",
" VISIBLE: 'Foo', cursor=1",
"SPEECH OUTPUT: 'Foo link.'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| chrys87/orca-beep | test/keystrokes/firefox/focus_tracking_link_child_of_body.py | Python | lgpl-2.1 | 1,278 |
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf.urls import patterns, url
urlpatterns = patterns('geonode.geoserver.views',
url(r'^rest/stores/(?P<store_type>\w+)/$', 'stores', name="stores"),
(r'^rest/styles', 'geoserver_rest_proxy', dict(proxy_path='/gs/rest/styles',
downstream_path='rest/styles')),
(r'^rest/layers', 'geoserver_rest_proxy', dict(proxy_path='/gs/rest/layers',
downstream_path='rest/layers')),
(r'^rest/sldservice', 'geoserver_rest_proxy', dict(proxy_path='/gs/rest/sldservice',
downstream_path='rest/sldservice')),
url(r'^updatelayers/$', 'updatelayers', name="updatelayers"),
url(r'^(?P<layername>[^/]*)/style$', 'layer_style', name="layer_style"),
url(r'^(?P<layername>[^/]*)/style/upload$', 'layer_style_upload', name='layer_style_upload'),
url(r'^(?P<layername>[^/]*)/style/manage$', 'layer_style_manage', name='layer_style_manage'),
url(r'^(?P<layername>[^/]*)/edit-check?$', 'feature_edit_check', name="feature_edit_check"),
url(r'^acls/?$', 'layer_acls', name='layer_acls'),
url(r'^resolve_user/?$', 'resolve_user', name='layer_resolve_user'),
url(r'^download$', 'layer_batch_download', name='layer_batch_download'),
)
| Phil-LiDAR2-Geonode/pl2-geonode | geonode/geoserver/urls.py | Python | gpl-3.0 | 2,446 |
from mrjob.job import MRJob
import sys
class GlobalState(MRJob):
def __init__(self):
self.GlobalList = []
def mapper_init(self):
print("cat", file=sys.stderr)
def mapper(self, _, lines):
if "wi" in lines:
self.GlobalList.append(lines[2:8])
yield (lines, 1)
def reducer(self, values, counts):
pass
def reducer_final(self):
yield(self.GlobalList, 1)
if __name__ == "__main__":
GlobalState.run() | JasonSanchez/w261 | week5/GlobalState.py | Python | mit | 508 |
"""
Common functionality used in save() methods and the like live here.
This is basically anything that can be abstracted away for readability
like hashing or getting a media path, etc...
"""
from django.conf import settings
from django.utils import timezone
from django.db import connections
from django.db.models.fields import AutoField
from django.db.models.loading import get_model
from contextlib import contextmanager
from threading import local
from .exceptions import UnknownUserError
from docutils.core import publish_parts
from importlib import import_module
from collections import defaultdict
from nltk import stem
import os
import uuid
import simhash
def get_audio_path(instance, filename):
"""
Recieves an instance of the Audio model and returns an appropriate
filename/filepath (the hash_id + audio_file hash) for the save()
function of the FileField to use.
"""
return os.path.join(
settings.MEDIA_ROOT,
instance.sentence.hash_id + '-' + instance.hash_id + '.mp3'
)
def now():
return timezone.now()
def uuid4():
return uuid.uuid4().hex
def sim_hash(text):
return simhash.Simhash(text).value
def truncated_sim_hash(text):
return int('%.19s' % (sim_hash(text)))
thread_local_storage = local()
@contextmanager
def work_as(user):
if not hasattr(thread_local_storage, 'temp_user_list'):
thread_local_storage.temp_user_list = []
thread_local_storage.temp_user_list.append(user)
yield
thread_local_storage.temp_user_list.pop()
def get_user():
if not hasattr(thread_local_storage, 'temp_user_list') or \
not thread_local_storage.temp_user_list:
raise UnknownUserError(
"Please wrap this in a work_as context manager and provide a \
User object."
)
return thread_local_storage.temp_user_list[-1]
def sentence_presave(sent):
if not sent.id:
sent.hash_id = uuid4()
sent.owner = sent.added_by
if sent.text:
sent.length = len(sent.text)
sent.sim_hash = truncated_sim_hash(sent.text)
return sent
def correction_presave(corr):
if not corr.id and corr.sentence:
corr.hash_id = uuid4()
sent = corr.sentence
sent.has_correction = True
sent.save(update_fields=['has_correction'])
return corr
def tag_presave(tag):
if not tag.id:
tag.hash_id = uuid4()
return tag
def rest(text):
return publish_parts(text, writer_name='html')['body']
def markup_to_html(text, markup):
from .choices import MARKUPS_SUPPORTED
if markup in MARKUPS_SUPPORTED:
converter = MARKUPS_SUPPORTED[markup][1]
return converter(text)
return text
class ClassProperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
classproperty = ClassProperty
# sample utility class for stemming, tokenizing, and removing stopwords
STEMMERS = {
'eng': stem.snowball.EnglishStemmer()
}
STEMMERS = getattr(settings, 'HAYSTACK_STEMMERS', STEMMERS)
TOKENIZERS = getattr(settings, 'HAYSTACK_TOKENIZERS', {})
STOP_WORDS = getattr(settings, 'HAYSTACK_STOP_WORDS', {})
class Stemmer(object):
def __init__(self, lang=None):
self.lang = lang
self.stemmer = STEMMERS.get(lang, None)
self.tokenizer = TOKENIZERS.get(lang, None)
self.stop_words = set(STOP_WORDS.get(lang, set()))
def stem(self, text, lang):
lang = lang or self.lang
stemmer = STEMMERS.get(lang, None) or self.stemmer
if not stemmer: return ''
stemmed_text = []
for token in self.tokenize(text):
if token not in self.stop_words:
token = stemmer.stem(token)
stemmed_text.append(token)
stemmed_text = ' '.join(stemmed_text)
return stemmed_text
def tokenize(self, text):
tokenizer = self.tokenizer if self.tokenizer else lambda s: s.split()
for token in tokenizer(text):
yield token
stemmer = Stemmer()
def import_path(path):
module = '.'.join(path.split('.')[:-1])
cls = path.split('.')[-1]
module = import_module(module)
cls = getattr(module, cls)
return cls
graph_backend = import_path(settings.GRAPH_BACKEND)
def redraw_subgraph(links=[], unlinks=[]):
Sentence = get_model('pytoeba', 'Sentence')
Link = get_model('pytoeba', 'Link')
link_qs = Link.objects.none()
for link in links:
link_qs = link_qs | \
Link.objects.filter(side1_id=link[0]) | \
Link.objects.filter(
side1__in=Sentence.objects.filter(
side1_set__in=Link.objects.filter(side1_id=link[0])
),
level=1
) | \
Link.objects.filter(side2_id=link[0]) | \
Link.objects.filter(
side2__in=Sentence.objects.filter(
side2_set__in=Link.objects.filter(side2_id=link[0])
),
level=1
) | \
Link.objects.filter(side1_id=link[1]) | \
Link.objects.filter(
side1__in=Sentence.objects.filter(
side1_set__in=Link.objects.filter(side1_id=link[1])
),
level=1
) | \
Link.objects.filter(side2_id=link[1]) | \
Link.objects.filter(
side2__in=Sentence.objects.filter(
side2_set__in=Link.objects.filter(side2_id=link[1])
),
level=1
)
unlink_qs = Link.objects.none()
for unlink in unlinks:
unlink_qs = unlink_qs | \
Link.objects.filter(side1_id=unlink[0], side2_id=unlink[1], level=1)
subgraph_links = list(link_qs | unlink_qs)
subgraph = graph_backend(subgraph_links)
for link in links:
subgraph.add_edge(link[0], link[1])
for unlink in unlinks:
subgraph.remove_edge(link[0], link[1])
relinked_subgraph_links = subgraph.get_recomputed_links(
created=True, updated=True, deleted=True
)
created = relinked_subgraph_links['created']
updated = relinked_subgraph_links['updated']
deleted = relinked_subgraph_links['deleted']
if created:
created = bulk_create(created)
if updated:
bulk_update(updated, update_fields=['level'], case_fields=['side1_id', 'side2_id'])
if deleted:
bulk_delete(deleted)
def fix_pythonism(value):
if not value:
if value is None:
value = 'NULL'
if isinstance(value, unicode):
value = '\'\''
if isinstance(value, bool):
value = unicode(value).upper()
return value
def bulk_create(objs, using='default'):
ref_obj = objs[0]
meta = ref_obj._meta
model = meta.model
fields = meta.fields
return model._base_manager._insert(objs, fields=fields, using=using)
def bulk_delete(objs, case_field='id', using='default', as_sql=False):
connection = connections[using]
# using objects not from the same table will break everything
ref_obj = objs[0]
meta = ref_obj._meta
case_field = [f for f in meta.fields if f.attname == case_field][0]
cf_vals = []
for obj in objs:
val = getattr(obj, case_field.attname)
val = case_field.get_db_prep_save(val, connection)
val = fix_pythonism(val)
cf_vals.append(val)
sql = []
params = []
sql.append('DELETE FROM %s ' % meta.db_table)
sql.append('WHERE %s IN (%%s)' % case_field)
params.append(', '.join(cf_vals))
sql = ''.join(sql)
if as_sql:
return sql, params
connection.cursor().execute(sql, params)
def bulk_update(objs, case_field='id', using='default', update_fields=[],
case_fields=[], as_sql=False):
connection = connections[using]
# using objects not from the same table will break everything
ref_obj = objs[0]
meta = ref_obj._meta
fields = meta.fields
update_fields = update_fields or meta.get_all_field_names()
# get actual field object for case_field by name
case_field = [f for f in fields if f.attname == case_field][0]
if case_fields:
case_fields = [f for f in fields if f.attname in case_fields]
# filter out auto fields and fields not specified in update_fields
fields = [
f
for f in fields
if not isinstance(f, AutoField) and f.attname in update_fields
]
# initialize cases
cases = {}
for field in fields:
cases[field.column] = defaultdict(dict)
# populate a case per field with case_field/field value pairs
# the defaultdict ensures unique values per case per case_field
for obj in objs:
# get raw values appropriate for the db backend
if not case_fields:
cf_value = case_field.get_db_prep_save(
getattr(obj, case_field.attname), connection
)
for field in fields:
f_value = field.get_db_prep_save(
getattr(obj, field.attname), connection
)
f_value = fix_pythonism(f_value)
cases[field.column].update({cf_value: f_value})
else:
cf_values = []
for field in case_fields:
cf_value = field.get_db_prep_save(
getattr(obj, field.attname), connection
)
cf_value = fix_pythonism(cf_value)
cf_values.append(cf_value)
cf_values = tuple(cf_values)
for field in fields:
f_value = field.get_db_prep_save(
getattr(obj, field.attname), connection
)
f_value = fix_pythonism(f_value)
cases[field.column].update({cf_values: f_value})
# build sql query
indent = ' '
newline = '\n'
sql = []
params = []
sql.append('UPDATE %s' % meta.db_table)
sql.append(newline)
sql.append(indent)
sql.append('SET')
sql.append(newline)
cf_vals = set()
for case, values in cases.iteritems():
sql.append(indent * 2)
if not case_fields:
sql.append('%s = CASE %s' % (case, case_field.attname))
else:
sql.append('%s = CASE' % case)
sql.append(newline)
for cf_value, field_value in values.items():
cf_vals.add(cf_value)
if not case_fields:
sql.append(indent * 3)
sql.append('WHEN %s THEN %s')
sql.append(newline)
params.append(cf_value)
params.append(field_value)
else:
cond = []
for cf in case_fields:
cond.append('%s = %%s' % cf.attname)
cond = ' AND '.join(cond)
sql.append(indent * 3)
sql.append('WHEN (%s) THEN %%s' % cond)
sql.append(newline)
for cf_v in cf_value:
params.append(cf_v)
params.append(field_value)
sql.append(indent * 2)
sql.append('END,')
sql.append(newline)
sql.pop()
sql.pop()
sql.append('END')
sql.append(newline)
if not case_fields:
sql.append('WHERE %s IN (%%s)' % case_field.attname)
params.append(', '.join(str(v) for v in cf_vals))
else:
if connection.vendor != 'sqlite': # change this connection.vendor
cfs = ', '.join([cf.attname for cf in case_fields])
sql.append('WHERE (%s) IN (%%s)' % cfs)
cf_param = []
for cf_v in cf_vals:
cf_param.append('(' + ', '.join(str(v) for v in cf_v) + ')')
params.append(', '.join(cf_param))
else:
comps = []
for cf in case_fields:
comps.append('%s = %%s' % cf.attname)
comps = ' AND '.join(comps)
cond = []
for cf_v in cf_vals:
cond.append('(%s)' % comps)
for v in cf_v:
params.append(str(v))
cond = ' OR '.join(cond)
sql.append('WHERE %s' % cond)
sql = ''.join(sql)
if as_sql:
return sql, params
connection.cursor().execute(sql, params)
def bulk_upsert(objs, case_field='id', using='default', _return=True):
# get some basic info about the objects, they all have to be for the same
# table or well, things can go horribly wrong.
ref_obj = objs[0]
meta = ref_obj._meta
model = meta.model
fields = meta.fields
# handle common case first, case_field is id
# missing ids will always be inserts
if case_field == 'id':
all_objs = set(objs)
for_insert = set(obj for obj in objs if not obj.id)
for_update = all_objs - for_insert
bulk_update(for_update, case_field, using)
# bypass the fucking stupid forced batch_size in django's bulk_create
for_insert = bulk_create(for_insert, using=using)
return list(for_insert), list(for_update)
# handle the more generic case
# we don't know if some of these fields in case_field (that should be
# unique btw, or you're fucked hardcore) are actually already there
# so we do a select to find out
cf_vals = [getattr(obj, case_field) for obj in objs]
filter_kwargs = {case_field+'__in': cf_vals}
all_objs = set(objs)
for_update = set(model.objects.filter(**filter_kwargs))
for_insert = all_objs - for_update
bulk_update(for_update, case_field, using)
# bypass the fucking stupid forced batch_size in django's bulk_create
for_insert = model._base_manager._insert(for_insert, fields=fields, using=using)
if _return:
return list(for_insert), list(for_update)
| loolmeh/pytoeba | pytoeba/utils.py | Python | mit | 13,918 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
# Sentinel to avoid the situation where `None` *is* the default value.
NoDefault = collections.namedtuple('NoDefault', [])()
class ValueNotFoundException(Exception):
"""Raised when a value cannot be found. Used for control-flow only."""
class Handler:
def __init__(self, name, prefix='', default=NoDefault, description=None):
# e.g. my_option_name
self.name = name
# e.g. p_my_option_name
self.click_name = prefix + name
self.default = default
self.description = description
self.missing = []
@property
def cli_name(self):
import q2cli.util
# e.g. p-my-option-name
return q2cli.util.to_cli_name(self.click_name)
def get_click_options(self):
"""Should yield 1 or more click.Options"""
raise NotImplementedError()
def get_value(self, arguments, fallback=None):
"""Should find 1 or more arguments and convert to a single API value"""
raise NotImplementedError()
def _locate_value(self, arguments, fallback, multiple=False):
"""Default lookup procedure to find a click.Option provided by user"""
# TODO revisit this interaction between _locate_value, single vs.
# multiple options, and fallbacks. Perhaps handlers should always
# use tuples to store values, even for single options, in order to
# normalize single-vs-multiple option handling. Probably not worth
# revisiting until there are more unit + integration tests of q2cli
# since there's the potential to break things.
# Is it in args?
v = arguments[self.click_name]
missing_value = () if multiple else None
if v != missing_value:
return v
# Does our fallback know about it?
if fallback is not None:
try:
fallback_value = fallback(self.name, self.cli_name)
except ValueNotFoundException:
pass
else:
# TODO fallbacks don't know whether they're handling a single
# vs. multiple option, so the current expectation is that
# fallbacks will always return a single value. Revisit this
# expectation in the future; perhaps fallbacks should be aware
# of single-vs-multiple options, or perhaps they could always
# return a tuple.
if multiple:
fallback_value = (fallback_value,)
return fallback_value
# Do we have a default?
if self.default is not NoDefault:
return self.default
# Give up
self.missing.append(self.cli_name)
raise ValueNotFoundException()
def _parse_boolean(self, string):
"""Parse string representing a boolean into Python bool type.
Supported values match `configparser.ConfigParser.getboolean`.
"""
trues = ['1', 'yes', 'true', 'on']
falses = ['0', 'no', 'false', 'off']
string_lower = string.lower()
if string_lower in trues:
return True
elif string_lower in falses:
return False
else:
import itertools
import click
msg = (
"Error: unrecognized value for --%s flag: %s\n"
"Supported values (case-insensitive): %s" %
(self.cli_name, string,
', '.join(itertools.chain(trues, falses)))
)
click.secho(msg, err=True, fg='red', bold=True)
ctx = click.get_current_context()
ctx.exit(1)
def _add_description(self, option, requirement):
def pretty_cat(a, b, space=1):
if a:
return a + (' ' * space) + b
return b
if self.description:
option.help = pretty_cat(option.help, self.description)
option.help = pretty_cat(option.help, requirement, space=2)
return option
class VerboseHandler(Handler):
"""Handler for verbose output (--verbose flag)."""
def __init__(self):
super().__init__('verbose', default=False)
def get_click_options(self):
import q2cli
# `is_flag` will set the default to `False`, but `self._locate_value`
# needs to distinguish between the presence or absence of the flag
# provided by the user.
yield q2cli.Option(
['--' + self.cli_name], is_flag=True, default=None,
help='Display verbose output to stdout and/or stderr during '
'execution of this action. [default: %s]' % self.default)
def get_value(self, arguments, fallback=None):
value = self._locate_value(arguments, fallback)
# Value may have been specified in --cmd-config (or another source in
# the future). If we don't have a bool type yet, attempt to interpret a
# string representing a boolean.
if type(value) is not bool:
value = self._parse_boolean(value)
return value
class QuietHandler(Handler):
"""Handler for quiet output (--quiet flag)."""
def __init__(self):
super().__init__('quiet', default=False)
def get_click_options(self):
import q2cli
# `is_flag` will set the default to `False`, but `self._locate_value`
# needs to distinguish between the presence or absence of the flag
# provided by the user.
yield q2cli.Option(
['--' + self.cli_name], is_flag=True, default=None,
help='Silence output if execution is successful '
'(silence is golden). [default: %s]' % self.default)
def get_value(self, arguments, fallback=None):
value = self._locate_value(arguments, fallback)
# Value may have been specified in --cmd-config (or another source in
# the future). If we don't have a bool type yet, attempt to interpret a
# string representing a boolean.
if type(value) is not bool:
value = self._parse_boolean(value)
return value
class OutputDirHandler(Handler):
"""Meta handler which returns a fallback function as its value."""
def __init__(self):
super().__init__('output_dir')
def get_click_options(self):
import click
import q2cli
yield q2cli.Option(
['--' + self.cli_name],
type=click.Path(exists=False, dir_okay=True, file_okay=False,
writable=True),
help='Output unspecified results to a directory')
def get_value(self, arguments, fallback=None):
import os
import os.path
import click
try:
path = self._locate_value(arguments, fallback=fallback)
# TODO: do we want a --force like flag?
if os.path.exists(path):
click.secho("Error: --%s directory already exists, won't "
"overwrite." % self.cli_name, err=True, fg='red',
bold=True)
ctx = click.get_current_context()
ctx.exit(1)
os.makedirs(path)
def fallback_(name, cli_name):
return os.path.join(path, name)
return fallback_
except ValueNotFoundException:
# Always fail to find a value as this handler doesn't exist.
def fail(*_):
raise ValueNotFoundException()
return fail
class CommandConfigHandler(Handler):
"""Meta handler which returns a fallback function as its value."""
def __init__(self, cli_plugin, cli_action):
self.cli_plugin = cli_plugin
self.cli_action = cli_action
super().__init__('cmd_config')
def get_click_options(self):
import click
import q2cli
yield q2cli.Option(
['--' + self.cli_name],
type=click.Path(exists=True, dir_okay=False, file_okay=True,
readable=True),
help='Use config file for command options')
def get_value(self, arguments, fallback=None):
import configparser
import warnings
try:
path = self._locate_value(arguments, fallback=fallback)
config = configparser.ConfigParser()
config.read(path)
try:
config_section = config['.'.join([
self.cli_plugin, self.cli_action
])]
except KeyError:
warnings.warn("Config file does not contain a section"
" for %s"
% '.'.join([self.cli_plugin, self.cli_action]),
UserWarning)
raise ValueNotFoundException()
def fallback_(name, cli_name):
try:
return config_section[cli_name]
except KeyError:
raise ValueNotFoundException()
return fallback_
except ValueNotFoundException:
# Always fail to find a value as this handler doesn't exist.
def fail(*_):
raise ValueNotFoundException()
return fail
class GeneratedHandler(Handler):
def __init__(self, name, repr, ast, default=NoDefault, description=None):
super().__init__(name, prefix=self.prefix, default=default,
description=description)
self.repr = repr
self.ast = ast
class CollectionHandler(GeneratedHandler):
view_map = {
'List': list,
'Set': set
}
def __init__(self, inner_handler, **kwargs):
self.inner_handler = inner_handler
# inner_handler needs to be set first so the prefix lookup works
super().__init__(**kwargs)
self.view_type = self.view_map[self.ast['name']]
@property
def prefix(self):
return self.inner_handler.prefix
def get_click_options(self):
import q2cli.core
for option in self.inner_handler.get_click_options():
option.multiple = True
# validation happens on a callback for q2cli.core.Option, so unset
# it because we need standard click behavior for multi-options
# without this, the result of not-passing a value is `None` instead
# of `()` which confuses ._locate_value
option.callback = None
option.type = q2cli.core.MultipleType(option.type)
yield option
def get_value(self, arguments, fallback=None):
args = self._locate_value(arguments, fallback, multiple=True)
if args is None:
return None
decoded_values = []
for arg in args:
# Use an empty dict because we don't need the inner handler to
# look for anything; that's our job. We just need it to decode
# whatever it was we found.
empty = collections.defaultdict(lambda: None)
decoded = self.inner_handler.get_value(empty,
fallback=lambda *_: arg)
decoded_values.append(decoded)
value = self.view_type(decoded_values)
if len(value) != len(decoded_values):
self._error_with_duplicate_in_set(decoded_values)
return value
def _error_with_duplicate_in_set(self, elements):
import click
import collections
counter = collections.Counter(elements)
dups = {name for name, count in counter.items() if count > 1}
ctx = click.get_current_context()
click.echo(ctx.get_usage() + '\n', err=True)
click.secho("Error: Option --%s was given these values: %r more than "
"one time, values passed should be unique."
% (self.cli_name, dups), err=True, fg='red', bold=True)
ctx.exit(1)
class ArtifactHandler(GeneratedHandler):
prefix = 'i_'
def get_click_options(self):
import q2cli
import q2cli.core
type = q2cli.core.ResultPath(repr=self.repr, exists=True,
file_okay=True, dir_okay=False,
readable=True)
if self.default is None:
requirement = '[optional]'
else:
requirement = '[required]'
option = q2cli.Option(['--' + self.cli_name], type=type, help="")
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
import qiime2
path = self._locate_value(arguments, fallback)
if path is None:
return None
else:
return qiime2.Artifact.load(path)
class ResultHandler(GeneratedHandler):
prefix = 'o_'
def get_click_options(self):
import q2cli
type = q2cli.core.ResultPath(self.repr, exists=False, file_okay=True,
dir_okay=False, writable=True)
option = q2cli.Option(['--' + self.cli_name], type=type, help="")
yield self._add_description(
option, '[required if not passing --output-dir]')
def get_value(self, arguments, fallback=None):
return self._locate_value(arguments, fallback)
def parameter_handler_factory(name, repr, ast, default=NoDefault,
description=None):
if ast['name'] == 'Metadata':
return MetadataHandler(name, default=default, description=description)
elif ast['name'] == 'MetadataCategory':
return MetadataCategoryHandler(name, default=default,
description=description)
else:
return RegularParameterHandler(name, repr, ast, default=default,
description=description)
class MetadataHandler(Handler):
def __init__(self, name, default=NoDefault, description=None):
if default is not NoDefault and default is not None:
raise TypeError(
"The only supported default value for Metadata is `None`. "
"Found this default value: %r" % (default,))
super().__init__(name, prefix='m_', default=default,
description=description)
self.click_name += '_file'
def get_click_options(self):
import click
import q2cli
import q2cli.core
name = '--' + self.cli_name
type = click.Path(exists=True, file_okay=True, dir_okay=False,
readable=True)
type = q2cli.core.MultipleType(type)
help = ('Metadata file or artifact viewable as metadata. This '
'option may be supplied multiple times to merge metadata.')
if self.default is None:
requirement = '[optional]'
else:
requirement = '[required]'
option = q2cli.Option([name], type=type, help=help, multiple=True)
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
import os
import qiime2
import q2cli.util
paths = self._locate_value(arguments, fallback, multiple=True)
if paths is None:
return paths
metadata = []
for path in paths:
try:
# check to see if path is an artifact
artifact = qiime2.Artifact.load(path)
except Exception:
try:
metadata.append(qiime2.Metadata.load(path))
except Exception as e:
header = ("There was an issue with loading the file %s as "
"metadata:" % path)
with open(os.devnull, 'w') as dev_null:
q2cli.util.exit_with_error(
e, header=header, file=dev_null,
suppress_footer=True)
else:
try:
metadata.append(qiime2.Metadata.from_artifact(artifact))
except Exception as e:
header = ("There was an issue with viewing the artifact "
"%s as metadata:" % path)
with open(os.devnull, 'w') as dev_null:
q2cli.util.exit_with_error(
e, header=header, file=dev_null,
suppress_footer=True)
return metadata[0].merge(*metadata[1:])
class MetadataCategoryHandler(Handler):
def __init__(self, name, default=NoDefault, description=None):
if default is not NoDefault and default is not None:
raise TypeError(
"The only supported default value for MetadataCategory is "
"`None`. Found this default value: %r" % (default,))
super().__init__(name, prefix='m_', default=default,
description=description)
self.click_name += '_category'
# Not passing `description` to metadata handler because `description`
# applies to the metadata category (`self`).
self.metadata_handler = MetadataHandler(name, default=default)
def get_click_options(self):
import q2cli
name = '--' + self.cli_name
type = str
help = ('Category from metadata file or artifact viewable as '
'metadata.')
if self.default is None:
requirement = '[optional]'
else:
requirement = '[required]'
option = q2cli.Option([name], type=type, help=help)
yield from self.metadata_handler.get_click_options()
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
# Attempt to find all options before erroring so that all handlers'
# missing options can be displayed to the user.
try:
metadata_value = self.metadata_handler.get_value(arguments,
fallback=fallback)
except ValueNotFoundException:
pass
try:
category_value = self._locate_value(arguments, fallback)
except ValueNotFoundException:
pass
missing = self.metadata_handler.missing + self.missing
if missing:
self.missing = missing
raise ValueNotFoundException()
# If metadata category is optional, there is a chance for metadata to
# be provided without a metadata category, or vice versa.
if metadata_value is None and category_value is not None:
self.missing.append(self.metadata_handler.cli_name)
raise ValueNotFoundException()
elif metadata_value is not None and category_value is None:
self.missing.append(self.cli_name)
raise ValueNotFoundException()
if metadata_value is None and category_value is None:
return None
else:
return metadata_value.get_category(category_value)
class RegularParameterHandler(GeneratedHandler):
prefix = 'p_'
def __init__(self, name, repr, ast, default=NoDefault, description=None):
import q2cli.util
super().__init__(name, repr, ast, default=default,
description=description)
# TODO: just create custom click.ParamType to avoid this silliness
if ast['type'] == 'collection':
ast, = ast['fields']
self.type = q2cli.util.convert_primitive(ast)
def get_click_options(self):
import q2cli
import q2cli.util
if self.type is bool:
no_name = self.prefix + 'no_' + self.name
cli_no_name = q2cli.util.to_cli_name(no_name)
name = '--' + self.cli_name + '/--' + cli_no_name
# click.Option type is determined implicitly for flags with
# secondary options, and explicitly passing type=bool results in a
# TypeError, so we pass type=None (the default).
option_type = None
else:
name = '--' + self.cli_name
option_type = self.type
if self.default is NoDefault:
requirement = '[required]'
elif self.default is None:
requirement = '[optional]'
else:
requirement = '[default: %s]' % self.default
# Pass `default=None` and `show_default=False` to `click.Option`
# because the handlers are responsible for resolving missing values and
# supplying defaults. Telling Click about the default value here makes
# it impossible to determine whether the user supplied or omitted a
# value once the handlers are invoked.
option = q2cli.Option([name], type=option_type, default=None,
show_default=False, help='')
yield self._add_description(option, requirement)
def get_value(self, arguments, fallback=None):
value = self._locate_value(arguments, fallback)
if value is None:
return None
elif self.type is bool:
# TODO: should we defer to the Bool primitive? It only allows
# 'true' and 'false'.
if type(value) is not bool:
value = self._parse_boolean(value)
return value
else:
import qiime2.sdk
primitive = qiime2.sdk.parse_type(self.repr, expect='primitive')
# TODO/HACK: the repr is the primitive used, but since there's a
# collection handler managing the set/list this get_value should
# handle only the pieces. This is super gross, but would be
# unecessary if click.ParamTypes were implemented for each
# kind of QIIME 2 input.
if self.ast['type'] == 'collection':
primitive, = primitive.fields
return primitive.decode(value)
| gregcaporaso/q2cli | q2cli/handlers.py | Python | bsd-3-clause | 22,305 |
import _plotly_utils.basevalidators
class DeltaValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="delta", parent_name="indicator", **kwargs):
super(DeltaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Delta"),
data_docs=kwargs.pop(
"data_docs",
"""
decreasing
:class:`plotly.graph_objects.indicator.delta.De
creasing` instance or dict with compatible
properties
font
Set the font used to display the delta
increasing
:class:`plotly.graph_objects.indicator.delta.In
creasing` instance or dict with compatible
properties
position
Sets the position of delta with respect to the
number.
reference
Sets the reference value to compute the delta.
By default, it is set to the current value.
relative
Show relative change
valueformat
Sets the value formatting rule using d3
formatting mini-language which is similar to
those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/indicator/_delta.py | Python | mit | 1,527 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
import pytest
from pymatgen.analysis.functional_groups import FunctionalGroupExtractor
from pymatgen.analysis.graphs import MoleculeGraph
from pymatgen.analysis.local_env import OpenBabelNN
from pymatgen.core.structure import Molecule
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "functional_groups")
pytest.importorskip("openbabel", reason="OpenBabel not installed")
pytest.importorskip("networkx", reason="NetworkX not installed")
__author__ = "Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Evan Spotte-Smith"
__email__ = "ewcspottesmith@lbl.gov"
__status__ = "Beta"
__date__ = "July 2018"
__credit__ = "Peiyuan Yu"
class FunctionalGroupExtractorTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.file = os.path.join(test_dir, "func_group_test.mol")
self.mol = Molecule.from_file(self.file)
self.strat = OpenBabelNN()
self.mg = MoleculeGraph.with_local_env_strategy(self.mol, self.strat)
self.extractor = FunctionalGroupExtractor(self.mg)
def tearDown(self):
warnings.simplefilter("default")
del self.extractor
del self.mg
del self.strat
del self.mol
del self.file
def test_init(self):
# Ensure that instantiation is equivalent for all valid input types
extractor_str = FunctionalGroupExtractor(self.file)
extractor_mol = FunctionalGroupExtractor(self.mol)
extractor_mg = self.extractor
self.assertEqual(extractor_str.molgraph, extractor_mol.molgraph)
self.assertEqual(extractor_str.molgraph, extractor_mg.molgraph)
self.assertEqual(extractor_str.species, extractor_mol.species)
self.assertEqual(extractor_str.species, extractor_mg.species)
# Test optimization
file_no_h = os.path.join(test_dir, "func_group_test_no_h.mol")
extractor_no_h = FunctionalGroupExtractor(file_no_h, optimize=True)
self.assertEqual(len(extractor_no_h.molecule), len(extractor_mol.molecule))
self.assertEqual(extractor_no_h.species, extractor_mol.species)
def test_get_heteroatoms(self):
heteroatoms = self.extractor.get_heteroatoms()
hetero_species = [self.extractor.species[x] for x in heteroatoms]
self.assertEqual(len(heteroatoms), 3)
self.assertEqual(sorted(hetero_species), ["N", "O", "O"])
# Test with limitation
hetero_no_o = self.extractor.get_heteroatoms(elements=["N"])
self.assertEqual(len(hetero_no_o), 1)
def test_get_special_carbon(self):
special_cs = self.extractor.get_special_carbon()
self.assertEqual(len(special_cs), 4)
# Test with limitation
special_cs_no_o = self.extractor.get_special_carbon(elements=["N"])
self.assertEqual(len(special_cs_no_o), 2)
def test_link_marked_atoms(self):
heteroatoms = self.extractor.get_heteroatoms()
special_cs = self.extractor.get_special_carbon()
link = self.extractor.link_marked_atoms(heteroatoms.union(special_cs))
self.assertEqual(len(link), 1)
self.assertEqual(len(link[0]), 9)
# Exclude Oxygen-related functional groups
heteroatoms_no_o = self.extractor.get_heteroatoms(elements=["N"])
special_cs_no_o = self.extractor.get_special_carbon(elements=["N"])
all_marked = heteroatoms_no_o.union(special_cs_no_o)
link_no_o = self.extractor.link_marked_atoms(all_marked)
self.assertEqual(len(link_no_o), 2)
def test_get_basic_functional_groups(self):
basics = self.extractor.get_basic_functional_groups()
# Molecule has one methyl group which will be caught.
self.assertEqual(len(basics), 1)
self.assertEqual(len(basics[0]), 4)
basics_no_methyl = self.extractor.get_basic_functional_groups(func_groups=["phenyl"])
self.assertEqual(len(basics_no_methyl), 0)
def test_get_all_functional_groups(self):
heteroatoms = self.extractor.get_heteroatoms()
special_cs = self.extractor.get_special_carbon()
link = self.extractor.link_marked_atoms(heteroatoms.union(special_cs))
basics = self.extractor.get_basic_functional_groups()
all_func = self.extractor.get_all_functional_groups()
self.assertEqual(len(all_func), (len(link) + len(basics)))
self.assertEqual(sorted(all_func), sorted(link + basics))
def test_categorize_functional_groups(self):
all_func = self.extractor.get_all_functional_groups()
categorized = self.extractor.categorize_functional_groups(all_func)
self.assertTrue("O=C1C=CC(=O)[N]1" in categorized.keys())
self.assertTrue("[CH3]" in categorized.keys())
total_count = sum([c["count"] for c in categorized.values()])
self.assertEqual(total_count, 2)
if __name__ == "__main__":
unittest.main()
| gmatteo/pymatgen | pymatgen/analysis/tests/test_functional_groups.py | Python | mit | 5,094 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-06-08 11:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0002_auto_20160605_2337'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, max_length=255, null=True, upload_to='user_profile', verbose_name='Foto')),
('data_nascimento', models.DateField(verbose_name='Data Nascimento')),
('sexo', models.IntegerField(choices=[(1, 'Masculino'), (2, 'Feminino')], verbose_name='Sexo')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile_user', to=settings.AUTH_USER_MODEL, verbose_name='Usu\xe1rio')),
],
),
]
| nicolaszein/chat | chat/migrations/0003_userprofile.py | Python | bsd-3-clause | 1,161 |
from django import template
from django.core.urlresolvers import reverse
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter()
@stringfilter
def actionlogize(value):
"""
Converts an ActionLog-Actionkey into a translateable, human string
"""
dictionary = {
"DELETE_ACTIONLOG": "Deleted the action-Log.",
"APPLY": "Applied pending changes.",
"CREATE_USER": "Created a user.",
"UPDATE_USER": "Updated a user.",
"DELETE_USER": "Deleted a user.",
"CREATE_KEY": "Created a key.",
"UPDATE_KEY": "Updated a key.",
"DELETE_KEY_FROM_USER": "Deleted a key from a user.",
"CREATE_USERGROUP": "Created a usergroup.",
"UPDATE_USERGROUP": "Updated a usergroup.",
"DELETE_USERGROUP": "Deleted a usergroup.",
"ASSIGN_USERINGROUP": "Assigned a user to a usergroup.",
"UNASSIGN_USERINGROUP": "Remove a user from a usergroup.",
"ASSIGN_USERGROUPINHOSTGROUP": "Associated a a hostgroup with a "
"usergroup",
"UNASSIGN_USERGROUPINHOSTGROUP": "Removed the association of a "
"hostgroup with a usergroup.",
"CREATE_HOST": "Created a host.",
"UPDATE_HOST": "Updated a host.",
"DELETE_HOST": "Deleted a host.",
"CREATE_HOSTGROUP": "Created a hostgroup.",
"UPDATE_HOSTGROUP": "Updated a hostgroup.",
"DELETE_HOSTGROUP": "Deleted a hostgroup.",
"ASSIGN_HOSTINGROUP": "Assigned a host to a hostgroup.",
"UNASSIGN_HOSTINGROUP": "Removed a host from a hostgroup."
}
if value in dictionary:
return dictionary[value]
else:
return value
@register.tag()
def get_actionobject(parser, token):
try:
tag_name, action, object_id, object_id2 = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires the action-code "
"and both object-ids " %
token.contents.split()[0]
)
return GetActionObjectNode(action, object_id, object_id2)
class GetActionObjectNode(template.Node):
def __init__(self, action, object_id, object_id2):
self.action = template.Variable(action)
self.object_id = template.Variable(object_id)
self.object_id2 = template.Variable(object_id2)
def render(self, context):
try:
action = self.action.resolve(context)
except template.VariableDoesNotExist:
action = ""
try:
object_id = self.object_id.resolve(context)
except template.VariableDoesNotExist:
object_id = ""
try:
object_id2 = self.object_id2.resolve(context)
except template.VariableDoesNotExist:
object_id2 = ""
dictionary = {
"CREATE_USER": {
"view": "users_edit",
"params": {
"pk": object_id
}
},
"UPDATE_USER": {
"view": "users_edit",
"params": {
"pk": object_id
}
},
"CREATE_KEY": {
"view": "users_keys_edit",
"params": {
"pk": object_id,
"user": object_id2
}
},
"UPDATE_KEY": {
"view": "users_keys_edit",
"params": {
"pk": object_id,
"user": object_id2
}
},
"CREATE_USERGROUP": {
"view": "usergroups_edit",
"params": {
"pk": object_id
}
},
"UPDATE_USERGROUP": {
"view": "usergroups_edit",
"params": {
"pk": object_id
}
},
"ASSIGN_USERINGROUP": {
"view": "users_groups_list",
"params": {
"user": object_id
}
},
"UNASSIGN_USERINGROUP": {
"view": "users_groups_list",
"params": {
"user": object_id
}
},
"ASSIGN_USERGROUPINHOSTGROUP": {
"view": "usergroups_hostgroups_list",
"params": {
"usergroup": object_id
}
},
"UNASSIGN_USERGROUPINHOSTGROUP": {
"view": "usergroups_hostgroups_list",
"params": {
"usergroup": object_id
}
},
"CREATE_HOST": {
"view": "hosts_edit",
"params": {
"pk": object_id
}
},
"UPDATE_HOST": {
"view": "hosts_edit",
"params": {
"pk": object_id
}
},
"CREATE_HOSTGROUP": {
"view": "hostgroups_edit",
"params": {
"pk": object_id
}
},
"UPDATE_HOSTGROUP": {
"view": "hostgroups_edit",
"params": {
"pk": object_id
}
},
"ASSIGN_HOSTINGROUP": {
"view": "hosts_groups_list",
"params": {
"host": object_id
}
},
"UNASSIGN_HOSTINGROUP": {
"view": "hosts_groups_list",
"params": {
"host": object_id
}
}
}
if action in dictionary:
return reverse(
dictionary[action]["view"],
kwargs = dictionary[action]["params"]
)
else:
return ""
| vialink/skd | keys/templatetags/actionlog.py | Python | bsd-2-clause | 6,141 |
#!/usr/bin/env python
import numpy as np
import argparse
import os
from respy.python.shared.shared_auxiliary import dist_class_attributes
from respy.python.estimate.estimate_auxiliary import get_optim_paras
from respy.python.shared.shared_auxiliary import dist_model_paras
from respy.python.shared.shared_auxiliary import get_est_info
from respy import estimate
from respy import RespyCls
def add_gradient_information(respy_obj):
""" This function adds information about the gradient to the information
files. It is not part of the estimation _modules as it breaks the design
and requires to carry additional attributes. This results in considerable
overhead, which appears justified at this point.
"""
model_paras, is_debug, paras_fixed, derivatives = \
dist_class_attributes(respy_obj, 'model_paras', 'is_debug',
'paras_fixed', 'derivatives')
# Auxiliary objects
coeffs_a, coeffs_b, coeffs_edu, coeffs_home, shocks_cholesky = \
dist_model_paras(model_paras, is_debug)
# Construct starting values
x_all_start = get_optim_paras(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
shocks_cholesky, 'all', paras_fixed, is_debug)
x_free_start = get_optim_paras(coeffs_a, coeffs_b, coeffs_edu, coeffs_home,
shocks_cholesky, 'free', paras_fixed, is_debug)
# Construct auxiliary information
num_free = len(x_free_start)
# The information about the gradient is simply added to the original
# information later. Note that the original file is read before the
# gradient evaluation. This is required as the information otherwise
# accounts for the multiple function evaluation during the gradient
# approximation scheme.
original_lines = open('est.respy.info', 'r').readlines()
fmt_ = '{0:<25}{1:>15}\n'
original_lines[-5] = fmt_.format(*[' Number of Steps', 0])
original_lines[-3] = fmt_.format(*[' Number of Evaluations', num_free])
# Approximate gradient by forward finite differences.
grad, ei = np.zeros((num_free,), float), np.zeros((26,), float)
dfunc_eps = derivatives[1]
# Making sure that the criterion is only evaluated at the relevant
# starting values.
respy_obj.unlock()
respy_obj.set_attr('maxfun', 0)
respy_obj.lock()
_, f0 = estimate(respy_obj)
for k, i in enumerate(np.where(np.logical_not(paras_fixed))[0].tolist()):
x_baseline = x_all_start.copy()
ei[i] = 1.0
d = dfunc_eps * ei
respy_obj.update_model_paras(x_baseline + d)
_, f1 = estimate(respy_obj)
grad[k] = (f1 - f0) / d[k]
ei[i] = 0.0
grad = np.random.uniform(0, 1, 26 - sum(paras_fixed)).tolist()
norm = np.amax(np.abs(grad))
# Write out extended information
with open('est.respy.info', 'a') as out_file:
# Insert information about gradient
out_file.write('\n\n\n\n Gradient\n\n')
fmt_ = '{0:>15} {1:>15}\n\n'
out_file.write(fmt_.format(*['Identifier', 'Start']))
fmt_ = '{0:>15} {1:15.4f}\n'
# Iterate over all candidate values, but only write the free
# ones to file. This ensure that the identifiers line up.
for j in range(26):
is_fixed = paras_fixed[j]
if not is_fixed:
values = [j, grad.pop(0)]
out_file.write(fmt_.format(*values))
out_file.write('\n')
# Add value of infinity norm
values = ['Norm', norm]
out_file.write(fmt_.format(*values))
out_file.write('\n\n')
def dist_input_arguments(parser):
""" Check input for estimation script.
"""
# Parse arguments
args = parser.parse_args()
# Distribute arguments
init_file = args.init_file
gradient = args.gradient
resume = args.resume
single = args.single
# Check attributes
assert (single in [True, False])
assert (resume in [False, True])
assert (os.path.exists(init_file))
if gradient:
# The gradient information is only provided if a single function
# evaluation is requested.
assert single
if resume:
assert (os.path.exists('est.respy.info'))
# Finishing
return resume, single, init_file, gradient
def scripts_estimate(resume, single, init_file, gradient):
""" Wrapper for the estimation.
"""
# Read in baseline model specification.
respy_obj = RespyCls(init_file)
# Update parametrization of the model if resuming from a previous
# estimation run.
if resume:
respy_obj.update_model_paras(get_est_info()['paras_step'])
# Set maximum iteration count when only an evaluation of the criterion
# function is requested.
if single:
respy_obj.unlock()
respy_obj.set_attr('maxfun', 0)
respy_obj.lock()
# Optimize the criterion function.
estimate(respy_obj)
if gradient:
add_gradient_information(respy_obj)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Start of estimation run with the RESPY package.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume', action='store_true', dest='resume',
default=False, help='resume estimation run')
parser.add_argument('--single', action='store_true', dest='single',
default=False, help='single evaluation')
parser.add_argument('--init_file', action='store', dest='init_file',
default='model.respy.ini', help='initialization file')
parser.add_argument('--gradient', action='store_true', dest='gradient',
default=False, help='gradient information')
# Process command line arguments
args = dist_input_arguments(parser)
# Run estimation
scripts_estimate(*args)
| restudToolbox/package | respy/scripts/scripts_estimate.py | Python | mit | 5,808 |
from __future__ import absolute_import
from django.http import HttpResponse
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from rules.contrib.views import (
LoginRequiredMixin,
PermissionRequiredMixin,
objectgetter,
permission_required,
)
from .models import Book
class BookMixin(object):
def get_object(self):
return Book.objects.get(pk=self.kwargs["book_id"])
class BookMixinWithError(object):
def get_object(self):
raise AttributeError("get_object")
@permission_required("testapp.change_book", fn=objectgetter(Book, "book_id"))
def change_book(request, book_id):
return HttpResponse("OK")
class BookCreateView(
LoginRequiredMixin, PermissionRequiredMixin, BookMixin, CreateView
):
fields = ["title"]
template_name = "empty.html"
permission_required = "testapp.create_book"
class BookUpdateView(
LoginRequiredMixin, PermissionRequiredMixin, BookMixin, UpdateView
):
fields = ["title"]
template_name = "empty.html"
permission_required = "testapp.change_book"
class BookUpdateErrorView(
LoginRequiredMixin, PermissionRequiredMixin, BookMixinWithError, UpdateView
):
fields = ["title"]
template_name = "empty.html"
permission_required = "testapp.change_book"
@permission_required("testapp.delete_book", fn=objectgetter(Book, "book_id"))
def delete_book(request, book_id):
return HttpResponse("OK")
class BookDeleteView(
LoginRequiredMixin, PermissionRequiredMixin, BookMixin, DeleteView
):
template_name = "empty.html"
permission_required = "testapp.delete_book"
@permission_required(
"testapp.delete_book", fn=objectgetter(Book, "book_id"), raise_exception=True
)
def view_that_raises(request, book_id):
return HttpResponse("OK")
class ViewThatRaises(
LoginRequiredMixin, PermissionRequiredMixin, BookMixin, DeleteView
):
template_name = "empty.html"
raise_exception = True
permission_required = "testapp.delete_book"
@permission_required(
["testapp.change_book", "testapp.delete_book"], fn=objectgetter(Book, "book_id")
)
def view_with_permission_list(request, book_id):
return HttpResponse("OK")
class ViewWithPermissionList(
LoginRequiredMixin, PermissionRequiredMixin, BookMixin, DeleteView
):
template_name = "empty.html"
permission_required = ["testapp.change_book", "testapp.delete_book"]
@permission_required("testapp.delete_book", fn=objectgetter(Book, "book_id"))
def view_with_object(request, book_id):
return HttpResponse("OK")
| dfunckt/django-rules | tests/testapp/views.py | Python | mit | 2,551 |
"""
Functions handling ontology annotations.
"""
from __future__ import print_function, division
import sys
import re
import bioservices
import libsbml
def getResourceUris(item):
""" Get list of resource URIs for the given element.
qualifierType = libsbml.BIOLOGICAL_QUALIFIER,
biologicalQualifierType = libsbml.BQB_IS):
:param item: sbml object
:type item: SBase
:return: list of resource URIs
:rtype: list
"""
uris = []
for i in range(item.getNumCVTerms()):
term = item.getCVTerm(i)
for j in range(term.getNumResources()):
uris.append(term.getResourceURI(j))
return uris
def getChebiId(item):
""" Returns the ChEBI ID from element.
:param item: sbml object
:type item: SBase
:return: first chebi id in rdf annotations, None if no chebi annotation
:rtype: str
"""
uris = getResourceUris(item)
chebiMatches = (re.match('.*(CHEBI:\d+)', uri) for uri in uris)
chebiIds = [match.group(1) for match in chebiMatches if match]
if len(chebiIds) > 0:
return chebiIds[0]
else:
return None
def matchSpeciesChebi(s1, s2, logging=False):
""" Match two Chebi identifiers.
If matching returns the chebi information of the identifier.
:param s1: first chebi id
:type s1: str
:param s2: second chebi id
:type s2: str
:param logging: log messages to console
:type logging: bool
:return: dictionary of chebi information, returns None if no match
:rtype: dict
"""
ch = bioservices.ChEBI()
ch1 = getChebiId(s1)
ch2 = getChebiId(s2)
if not ch1 or not ch2:
return None
if logging:
print('Comparing %s (%s) with %s (%s)' % (s1.getId(), ch1, s2.getId(), ch2))
try:
entry = ch.getCompleteEntity(ch1)
exact = []
if ch1 == ch2:
exact.append({'id': s2.getId()})
children = []
if hasattr(entry, 'OntologyChildren'):
for child in entry.OntologyChildren:
if child['chebiId'] == ch2:
children.append({
'id': s2.getId(),
'data': child
})
parents = []
if (hasattr(entry, 'OntologyParents')):
for parent in entry.OntologyParents:
if parent['chebiId'] == ch2:
parents.append({
'id': s2.getId(),
'data': parent
})
return {
'id': s1.getId(),
'chebi_name': entry.chebiAsciiName,
'exact': exact,
'children': children,
'parents': parents
}
except:
print("Unexpected error:", sys.exc_info()[0])
return None
def getMatchingSpecies(m1, m2, logging=False):
""" Returns a list of species with matching annotations URIs for two models
:param m1: first SBML model
:type m1: libsbml.Model
:param m2: second SBML model
:type m2: libsbml.Model
:param logging: log info
:type logging: bool
:return: returns list of chebi annotation information for matching species
:rtype: list
"""
if not isinstance(m1, libsbml.Model) or not isinstance(m2, libsbml.Model):
raise Exception('Need to call with two libsbml.Model instances')
matches = []
for s1 in m1.species:
for s2 in m2.species:
match = _annotations.matchSpeciesChebi(s1, s2, logging=logging)
if match:
if len(match['exact']) or len(match['children']) or len(match['parents']):
matches.append(match)
return matches
def printMatchingSpecies(matches):
""" Prints the matches from :func:`getMatchingSpecies`-
:param matches: matches from getMatchingSpecies
:type matches: list
"""
for match in matches:
if len(match['exact']):
print('%s exactly matches %s' % (match['exact'][0]['id'], match['id']))
if len(match['parents']):
print('%s %s %s' % (match['parents'][0]['id'], match['parents'][0]['data']['type'], match['id']))
if len(match['children']):
print('%s %s %s' % (match['children'][0]['id'], match['children'][0]['data']['type'], match['id']))
def getMatchingReactions(modelOrList, idToMatch):
""" Returns a list of reactions that contains a reactant with the id to match.
:param modelOrList: SBML Model or list of reactions
:type modelOrList: libsbml.Model or list[libsbml.Reaction]
:param idToMatch: reaction id for matching
:type idToMatch: str
:return: list of reactions
:rtype: list
"""
if isinstance(modelOrList, libsbml.Model):
reactions = modelOrList.reactions
else:
reactions = modelOrList
matches = []
for r in reactions:
for reactant in r.reactants:
if reactant.getSpecies() == idToMatch:
matches.append(r)
for reactant in r.products:
if reactant.getSpecies() == idToMatch:
matches.append(r)
for modifier in r.modifiers:
if modifier.getSpecies() == idToMatch:
matches.append(r)
return matches | kirichoi/tellurium | tellurium/analysis/annotations.py | Python | apache-2.0 | 5,238 |
import os, gzip, cPickle
import numpy as np
from theano import tensor as T, shared
def get_batch(iBatch, data, szBatch=256):
return data[iBatch*szBatch:(iBatch+1)*szBatch]
def load(name='mnist.pkl.gz'):
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(name)
if data_dir == "" and not os.path.isfile(name):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
name
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
name = new_path
if (not os.path.isfile(name)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, name)
# Load the dataset
f = gzip.open(name, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = shared(np.asarray(data_x, 'float32'),
borrow=borrow)
shared_y = shared(np.asarray(data_y, 'float32'),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval | pengsun/DeepLearningTutorials | mycode/dataset.py | Python | bsd-3-clause | 2,959 |
from leak import logger
def versions_split(version_str, type_applyer=int):
dots_count = version_str.count('.')
if dots_count == 0:
major, minor, patch = version_str, 0, 0
elif dots_count == 1:
major, minor = version_str.split('.')
patch = 0
elif dots_count == 2:
major, minor, patch = version_str.split('.')
else:
logger.debug(
'Incorrect version "{version}". '
'Move to bottom when sorting'.format(version=version_str))
major, minor, patch = 0, 0, 0
return list(map(type_applyer, (major, minor, patch)))
| bmwant21/leak | leak/version_parser.py | Python | mit | 604 |
import os
from generic import obj
from simulation import Simulation,SimulationInput,SimulationAnalyzer
from vasp_input import VaspInput,generate_vasp_input
from vasp_analyzer import VaspAnalyzer
class Vasp(Simulation):
input_type = VaspInput
analyzer_type = VaspAnalyzer
generic_identifier = 'vasp'
application = 'vasp'
application_properties = set(['serial','mpi'])
application_results = set([])
allow_overlapping_files = True
vasp_save_files = 'INCAR KPOINTS POSCAR CONTCAR DOSCAR EIGENVAL IBZKPT OSZICAR OUTCAR PCDAT XDATCAR vasprun.xml'.split()
def set_files(self):
self.infile = 'INCAR'
self.outfile = self.identifier + self.outfile_extension
self.errfile = self.identifier + self.errfile_extension
#end def set_files
def check_result(self,result_name,sim):
return False
#end def check_result
def get_result(self,result_name,sim):
self.not_implemented()
#end def get_result
def incorporate_result(self,result_name,result,sim):
self.not_implemented()
#end def incorporate_result
def app_command(self):
return self.app_name
#end def app_command
def check_sim_status(self):
success = False
outpath = os.path.join(self.locdir,self.identifier+'.OUTCAR')
exists = os.path.exists(outpath)
if not exists:
outpath = os.path.join(self.locdir,'OUTCAR')
exists = os.path.exists(outpath)
#end if
if exists:
outcar = open(outpath,'r').read()
success = 'General timing and accounting' in outcar
#end if
self.finished = success
#end def check_sim_status
def get_output_files(self):
output_files = []
for file in self.vasp_save_files:
native_file = os.path.join(self.locdir,file)
save_file = os.path.join(self.locdir,self.identifier+'.'+file)
if os.path.exists(native_file):
os.system('cp {0} {1}'.format(native_file,save_file))
output_files.append(file)
#end if
#end for
return output_files
#end def get_output_files
#end class Vasp
def generate_vasp(**kwargs):
sim_args,inp_args = Simulation.separate_inputs(kwargs,copy_pseudos=False)
sim_args.input = generate_vasp_input(**inp_args)
vasp = Vasp(**sim_args)
return vasp
#end def generate_vasp
# VASP HT (hand template) classes and functions
class VaspHTAnalyzer(SimulationAnalyzer):
def __init__(self,arg0=None,xml=False,analyze=False):
self.info = obj(xml=xml)
prefix = None
if isinstance(arg0,Simulation):
sim = arg0
infile = sim.infile
path = sim.locdir
elif arg0!=None:
path,infile = os.path.split(arg0)
if infile=='':
infile = None
#end if
if infile!=None:
if not infile.endswith('INCAR'):
self.error('please provide the path to an INCAR file')
#end if
prefix = infile.replace('INCAR','').strip()
if prefix=='':
prefix=None
#end if
#end if
else:
self.info.xml = False
return
#end if
self.info.set(
path = path,
infile = infile,
prefix = prefix
)
if analyze:
self.analyze()
#end if
#end def __init__
def analyze(self):
if self.info.xml:
xmlfile = 'vasprun.xml'
if self.info.prefix!=None:
xmlfile = self.info.prefix+xmlfile
#end if
self.xmldata = read_vxml(os.path.join(self.info.path,xmlfile))
#end if
#end def analyze
#end class VaspHTAnalyzer
class VaspHT(Simulation):
input_type = SimulationInput
analyzer_type = VaspHTAnalyzer
generic_identifier = 'vasp_ht'
infile_extension = None
application = 'vasp'
application_properties = set(['serial','mpi'])
application_results = set([])
allow_overlapping_files = True
vasp_save_files = 'INCAR KPOINTS POSCAR CONTCAR DOSCAR EIGENVAL IBZKPT OSZICAR OUTCAR PCDAT XDATCAR vasprun.xml'.split()
all_inputs = 'INCAR KPOINTS POSCAR POTCAR'.split()
all_outputs = 'CHG CHGCAR CONTCAR DOSCAR EIGENVAL IBZKPT OSZICAR OUTCAR PCDAT PROCAR WAVECAR XDATCAR'.split()
def set_files(self):
self.infile = 'INCAR'
self.outfile = self.identifier + self.outfile_extension
self.errfile = self.identifier + self.errfile_extension
#end def set_files
def check_result(self,result_name,sim):
return False
#end def check_result
def get_result(self,result_name,sim):
self.not_implemented()
#end def get_result
def incorporate_result(self,result_name,result,sim):
self.not_implemented()
#end def incorporate_result
def app_command(self):
command_line_args = ''
return self.app_name + command_line_args
#end def app_command
def check_sim_status(self):
success = False
outpath = os.path.join(self.locdir,'OUTCAR')
if os.path.exists(outpath):
outcar = open(outpath,'r').read()
success = 'General timing and accounting' in outcar
#end if
self.finished = success
#end def check_sim_status
def get_output_files(self):
output_files = []
for file in self.vasp_save_files:
native_file = os.path.join(self.locdir,file)
save_file = os.path.join(self.locdir,self.identifier+'.'+file)
if os.path.exists(native_file):
os.system('cp {0} {1}'.format(native_file,save_file))
output_files.append(file)
#end if
#end for
return output_files
#end def get_output_files
#end class VaspHT
def generate_vasp_ht(**kwargs):
sim_args,inp_args = Simulation.separate_inputs(kwargs,copy_pseudos=False)
if not 'input' in sim_args:
VaspHT.class_error('input keyword is required','generate_vasp_ht')
#end if
vasp_ht = VaspHT(**sim_args)
return vasp_ht
#end def generate_vasp_ht
| habanero-rice/hclib | test/performance-regression/full-apps/qmcpack/nexus/library/vasp.py | Python | bsd-3-clause | 6,365 |
import logging, time, random
from autotest.client.shared import error
from qemu.tests import drive_mirror
class DriveMirrorSimple(drive_mirror.DriveMirror):
def __init__(self, test, params, env, tag):
super(DriveMirrorSimple, self).__init__(test, params, env, tag)
@error.context_aware
def query_status(self):
"""
query runing block mirroring job info;
"""
error.context("query job status", logging.info)
if not self.get_status():
raise error.TestFail("No active job")
def run_drive_mirror_simple(test, params, env):
"""
drive_mirror_simple test:
1). launch block mirroring job w/o max speed
2). query job status on the device before steady status(optinal)
3). reset max job speed before steady status(optional)
4). cancel active job on the device before steady status(optional)
@param test: QEMU test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
tag = params.get("source_images", "image1")
repeats = int(params.get("repeat_times", 3))
simple_test = DriveMirrorSimple(test, params, env, tag)
try:
for i in range(repeats):
v_max,v_min = int(params.get("login_timeout", 360)) / 4, 0
time.sleep(random.randint(v_min, v_max))
simple_test.start()
simple_test.action_before_steady()
if simple_test.get_status():
simple_test.cancel()
finally:
simple_test.clean()
| sathnaga/virt-test | qemu/tests/drive_mirror_simple.py | Python | gpl-2.0 | 1,549 |
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Harsha Thyagaraja <harshkid@linux.vnet.ibm.com>
#
# Based on code by Martin Bligh <mbligh@google.com>
# copyright 2006 Google, Inc.
# https://github.com/autotest/autotest-client-tests/tree/master/ltp
import os
from avocado import Test
from avocado import main
from avocado.utils import build
from avocado.utils import process, archive
from avocado.utils.software_manager import SoftwareManager
from avocado.utils.partition import Partition
class Ltp_Fs(Test):
'''
Using LTP (Linux Test Project) testsuite to run Filesystem related tests
'''
def setUp(self):
'''
To check and install dependencies for the test
'''
sm = SoftwareManager()
for package in ['gcc', 'make', 'automake', 'autoconf']:
if not sm.check_installed(package) and not sm.install(package):
self.error("%s is needed for the test to be run", package)
self.disk = self.params.get('disk', default=None)
self.mount_point = self.params.get('dir', default=self.srcdir)
self.script = self.params.get('script')
fstype = self.params.get('fs', default='ext4')
self.args = self.params.get('args', default='')
if self.disk is not None:
self.part_obj = Partition(self.disk, mountpoint=self.mount_point)
self.log.info("Unmounting the disk/dir if it is already mounted")
self.part_obj.unmount()
self.log.info("creating %s file system on %s", fstype, self.disk)
self.part_obj.mkfs(fstype)
self.log.info("mounting %s on %s", self.disk, self.mount_point)
self.part_obj.mount()
url = "https://github.com/linux-test-project/ltp/"
url += "archive/master.zip"
tarball = self.fetch_asset("ltp-master.zip",
locations=[url], expire='7d')
archive.extract(tarball, self.teststmpdir)
ltp_dir = os.path.join(self.teststmpdir, "ltp-master")
os.chdir(ltp_dir)
build.make(ltp_dir, extra_args='autotools')
self.ltpbin_dir = os.path.join(ltp_dir, 'bin')
if not os.path.isdir(self.ltpbin_dir):
os.mkdir(self.ltpbin_dir)
process.system('./configure --prefix=%s' % self.ltpbin_dir)
build.make(ltp_dir)
build.make(ltp_dir, extra_args='install')
def test_fs_run(self):
'''
Downloads LTP, compiles, installs and runs filesystem
tests on a user specified disk
'''
if self.script == 'runltp':
logfile = os.path.join(self.logdir, 'ltp.log')
failcmdfile = os.path.join(self.logdir, 'failcmdfile')
self.args += (" -q -p -l %s -C %s -d %s"
% (logfile, failcmdfile, self.mount_point))
self.log.info("Args = %s", self.args)
cmd = '%s %s' % (os.path.join(self.ltpbin_dir, self.script),
self.args)
result = process.run(cmd, ignore_status=True)
# Walk the stdout and try detect failed tests from lines
# like these:
# aio01 5 TPASS : Test 5: 10 reads and
# writes in 0.000022 sec
# vhangup02 1 TFAIL : vhangup02.c:88:
# vhangup() failed, errno:1
# and check for fail_status The first part contain test name
fail_status = ['TFAIL', 'TBROK', 'TWARN']
split_lines = (line.split(None, 3)
for line in result.stdout.splitlines())
failed_tests = [items[0] for items in split_lines
if len(items) == 4 and items[2] in fail_status]
if failed_tests:
self.fail("LTP tests failed: %s" % ", ".join(failed_tests))
elif result.exit_status != 0:
self.fail("No test failures detected, but LTP finished with %s"
% (result.exit_status))
def tearDown(self):
'''
Cleanup of disk used to perform this test
'''
self.log.info("Removing the filesystem created on %s", self.disk)
delete_fs = "dd if=/dev/zero bs=512 count=512 of=%s" % self.disk
if process.system(delete_fs, shell=True, ignore_status=True):
self.fail("Failed to delete filesystem on %s", self.disk)
if self.disk is not None:
self.log.info("Unmounting disk %s on directory %s",
self.disk, self.mount_point)
self.part_obj.unmount()
if __name__ == "__main__":
main()
| apahim/avocado-misc-tests | io/disk/ltp_fs.py | Python | gpl-2.0 | 5,087 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
import fixtures
from lxml import etree
from oslo_config import cfg
import requests
import testtools
from testtools import content as test_content
from testtools import matchers
import urllib.parse as urlparse
from os_collect_config import cfn
from os_collect_config import collect
from os_collect_config import exc
META_DATA = {u'int1': 1,
u'strfoo': u'foo',
u'map_ab': {
u'a': 'apple',
u'b': 'banana',
}}
SOFTWARE_CONFIG_DATA = {
u'old-style': u'value',
u'deployments': [
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'Heat::Ungrouped',
u'name': 'dep-name1',
u'outputs': None,
u'options': None,
u'config': {
u'config1': 'value1'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'group': 'os-apply-config',
u'name': 'dep-name2',
u'outputs': None,
u'options': None,
u'config': {
u'config2': 'value2'
}
},
{
u'inputs': [
{
u'type': u'String',
u'name': u'input1',
u'value': u'value1'
}
],
u'name': 'dep-name3',
u'outputs': None,
u'options': None,
u'config': {
u'config3': 'value3'
}
},
{
u'inputs': [],
u'group': 'ignore_me',
u'name': 'ignore_me_name',
u'outputs': None,
u'options': None,
u'config': 'ignore_me_config'
}
]
}
SOFTWARE_CONFIG_IMPOSTER_DATA = {
u'old-style': u'value',
u'deployments': {
u"not": u"a list"
}
}
class FakeResponse(dict):
def __init__(self, text):
self.text = text
def raise_for_status(self):
pass
class FakeReqSession(object):
SESSION_META_DATA = META_DATA
def __init__(self, testcase, expected_netloc):
self._test = testcase
self._expected_netloc = expected_netloc
self.verify = False
def get(self, url, params, headers, verify=None, timeout=None):
self._test.addDetail('url', test_content.text_content(url))
url = urlparse.urlparse(url)
self._test.assertEqual(self._expected_netloc, url.netloc)
self._test.assertEqual('/v1/', url.path)
self._test.assertEqual('application/json',
headers['Content-Type'])
self._test.assertIn('SignatureVersion', params)
self._test.assertEqual('2', params['SignatureVersion'])
self._test.assertIn('Signature', params)
self._test.assertIn('Action', params)
self._test.assertEqual('DescribeStackResource',
params['Action'])
self._test.assertIn('LogicalResourceId', params)
self._test.assertEqual('foo', params['LogicalResourceId'])
self._test.assertEqual(10, timeout)
root = etree.Element('DescribeStackResourceResponse')
result = etree.SubElement(root, 'DescribeStackResourceResult')
detail = etree.SubElement(result, 'StackResourceDetail')
metadata = etree.SubElement(detail, 'Metadata')
metadata.text = json.dumps(self.SESSION_META_DATA)
if verify is not None:
self.verify = True
return FakeResponse(etree.tostring(root))
class FakeRequests(object):
exceptions = requests.exceptions
def __init__(self, testcase, expected_netloc='192.0.2.1:8000'):
self._test = testcase
self._expected_netloc = expected_netloc
def Session(self):
return FakeReqSession(self._test, self._expected_netloc)
class FakeReqSessionSoftwareConfig(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_DATA
class FakeRequestsSoftwareConfig(FakeRequests):
FAKE_SESSION = FakeReqSessionSoftwareConfig
def Session(self):
return self.FAKE_SESSION(self._test, self._expected_netloc)
class FakeReqSessionConfigImposter(FakeReqSession):
SESSION_META_DATA = SOFTWARE_CONFIG_IMPOSTER_DATA
class FakeRequestsConfigImposter(FakeRequestsSoftwareConfig):
FAKE_SESSION = FakeReqSessionConfigImposter
class FakeFailRequests(object):
exceptions = requests.exceptions
class Session(object):
def get(self, url, params, headers, verify=None, timeout=None):
raise requests.exceptions.HTTPError(403, 'Forbidden')
class TestCfnBase(testtools.TestCase):
def setUp(self):
super(TestCfnBase, self).setUp()
self.log = self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.NestedTempfile())
self.hint_file = tempfile.NamedTemporaryFile()
self.hint_file.write(u'http://192.0.2.1:8000'.encode('utf-8'))
self.hint_file.flush()
self.addCleanup(self.hint_file.close)
collect.setup_conf()
cfg.CONF.cfn.heat_metadata_hint = self.hint_file.name
cfg.CONF.cfn.metadata_url = None
cfg.CONF.cfn.path = ['foo.Metadata']
cfg.CONF.cfn.access_key_id = '0123456789ABCDEF'
cfg.CONF.cfn.secret_access_key = 'FEDCBA9876543210'
class TestCfn(TestCfnBase):
def test_collect_cfn(self):
cfn_md = cfn.Collector(requests_impl=FakeRequests(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_md = cfn_md[0][1]
for k in ('int1', 'strfoo', 'map_ab'):
self.assertIn(k, cfn_md)
self.assertEqual(cfn_md[k], META_DATA[k])
self.assertEqual('', self.log.output)
def test_collect_with_ca_cert(self):
cfn.CONF.cfn.ca_certificate = "foo"
collector = cfn.Collector(requests_impl=FakeRequests(self))
collector.collect()
self.assertTrue(collector._session.verify)
def test_collect_cfn_fail(self):
cfn_collect = cfn.Collector(requests_impl=FakeFailRequests)
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Forbidden', self.log.output)
def test_collect_cfn_no_path(self):
cfg.CONF.cfn.path = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No path configured', self.log.output)
def test_collect_cfn_bad_path(self):
cfg.CONF.cfn.path = ['foo']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('Path not in format', self.log.output)
def test_collect_cfn_no_metadata_url(self):
cfg.CONF.cfn.heat_metadata_hint = None
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)
self.assertIn('No metadata_url configured', self.log.output)
def test_collect_cfn_missing_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.not_there']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)
self.assertIn('Sub-key not_there does not exist', self.log.output)
def test_collect_cfn_sub_path(self):
cfg.CONF.cfn.path = ['foo.Metadata.map_ab']
cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))
content = cfn_collect.collect()
self.assertThat(content, matchers.IsInstance(list))
self.assertEqual('cfn', content[0][0])
content = content[0][1]
self.assertIn(u'b', content)
self.assertEqual(u'banana', content[u'b'])
def test_collect_cfn_metadata_url_overrides_hint(self):
cfg.CONF.cfn.metadata_url = 'http://127.0.1.1:8000/v1/'
cfn_collect = cfn.Collector(
requests_impl=FakeRequests(self,
expected_netloc='127.0.1.1:8000'))
cfn_collect.collect()
class TestCfnSoftwareConfig(TestCfnBase):
def test_collect_cfn_software_config(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsSoftwareConfig(self)).collect()
self.assertThat(cfn_md, matchers.IsInstance(list))
self.assertEqual('cfn', cfn_md[0][0])
cfn_config = cfn_md[0][1]
self.assertThat(cfn_config, matchers.IsInstance(dict))
self.assertEqual(set(['old-style', 'deployments']),
set(cfn_config.keys()))
self.assertIn('deployments', cfn_config)
self.assertThat(cfn_config['deployments'], matchers.IsInstance(list))
self.assertEqual(4, len(cfn_config['deployments']))
deployment = cfn_config['deployments'][0]
self.assertIn('inputs', deployment)
self.assertThat(deployment['inputs'], matchers.IsInstance(list))
self.assertEqual(1, len(deployment['inputs']))
self.assertEqual('dep-name1', cfn_md[1][0])
self.assertEqual('value1', cfn_md[1][1]['config1'])
self.assertEqual('dep-name2', cfn_md[2][0])
self.assertEqual('value2', cfn_md[2][1]['config2'])
def test_collect_cfn_deployments_not_list(self):
cfn_md = cfn.Collector(
requests_impl=FakeRequestsConfigImposter(self)).collect()
self.assertEqual(1, len(cfn_md))
self.assertEqual('cfn', cfn_md[0][0])
self.assertIn('not', cfn_md[0][1]['deployments'])
self.assertEqual('a list', cfn_md[0][1]['deployments']['not'])
| openstack/os-collect-config | os_collect_config/tests/test_cfn.py | Python | apache-2.0 | 10,601 |
"""
Django settings for django_magic_login project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8ow#z)z$lyvv@hlitmcyhfr&cclv1(@$!b2bk6ep0&$3whhfzq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_magic_login.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_magic_login.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Magic Login
LOGIN_URL = '/customers/login/'
MIDDLEWARE += ['sesame.middleware.AuthenticationMiddleware']
AUTHENTICATION_BACKENDS = ['sesame.backends.ModelBackend']
SESAME_TOKEN_NAME = "url_auth_token"
SESAME_MAX_AGE = 6 * 60 * 60 # 6 hour
EMAIL_HOST = ""
EMAIL_PORT = 2587
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "Admin<user@domain.com>" | greglinch/sourcelist | django-magic-link/django_magic_login/settings.py | Python | mit | 3,560 |
"""distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id: util.py 52231 2006-10-08 17:41:25Z ronald.oussoren $"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
For non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
f.close()
if m is not None:
macver = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = 'macosx'
platver = os.uname()[2]
osmajor = int(platver.split('.')[0])
if osmajor >= 8 and \
get_config_vars().get('UNIVERSALSDK', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return apply(os.path.join, paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
if os.name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os.name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
elif os.name == 'mac':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
# Chop off volume name from start of path
elements = string.split(pathname, ":", 1)
pathname = ":" + elements[1]
return os.path.join(new_root, pathname)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os.name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and not os.environ.has_key('HOME'):
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if not os.environ.has_key('PLAT'):
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if local_vars.has_key(var_name):
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
# check for Python 1.5.2-style {IO,OS}Error exception objects
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + "%s: %s" % (exc.filename, exc.strerror)
else:
# two-argument functions in posix module don't
# include the filename in the exception object!
error = prefix + "%s" % exc.strerror
else:
error = prefix + str(exc[-1])
return error
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError, \
"this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, \
"bad string (mismatched %s quotes?)" % s[end]
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
apply(func, args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError, "invalid truth value %r" % (val,)
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(string.join(map(repr, py_files), ",\n") + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
from py_compile import compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
cfile = file + (__debug__ and "c" or "o")
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, \
("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
lines = map(string.strip, lines)
header = string.join(lines, '\n' + 8*' ')
return header
| xbmc/atv2 | xbmc/lib/libPython/Python/Lib/distutils/util.py | Python | gpl-2.0 | 19,657 |
from theano import tensor as T, function, shared, config
import numpy as np
class Optimizer(object):
def update(self, param, grad, lr, transformer=None, grad_transformer=None):
if grad_transformer is not None:
grad = grad_transformer.transform(grad)
param_update, helper_update = self.update_(param, grad, lr)
if transformer is not None:
param, new_param = param_update
new_param = transformer.transform(new_param)
param_update = param, new_param
return [param_update] if helper_update is None else [param_update, helper_update]
def update_(self, param, grad, lr):
raise NotImplementedError
class SGD(Optimizer):
def update_(self, param, grad, lr):
r"""
.. math:: \theta := \theta - lr * \nabla_{\theta} J
"""
return (param, param - lr * grad), None
class Adagrad(Optimizer):
def update_(self, param, grad, lr):
r"""
.. math::
A := A + (\nabla_\theta J)^2
\theta := \theta - lr * \frac{\nabla_\theta J}{\sqrt A}
"""
helper = shared(value=np.zeros(param.get_value().shape, dtype=config.floatX))
new_helper = helper + grad**2
return (param, param - lr * grad / T.sqrt(1e-8 + new_helper)), (helper, new_helper)
class RMSProp(Optimizer):
def __init__(self, alpha=0.9, beta=0.1):
super(RMSProp, self).__init__()
self.alpha, self.beta = alpha, beta
def update_(self, param, grad, lr):
r"""
.. math::
A := \alpha A + \beta (\nabla_\theta J)^2
\theta := \theta - lr * \frac{\nabla_\theta J}{\sqrt A}
:param param: parameter to be updated
:param grad: gradient
:param lr: learning rate
:param alpha: cache mixing portion for the previous cache
:param beta: cache mixing portion for the new gradient
:return: updates for rmsprop
"""
helper = shared(value=np.zeros(param.get_value().shape, dtype=config.floatX))
new_helper = self.alpha * helper + self.beta * grad**2
return (param, param - lr * grad / T.sqrt(1e-8 + new_helper)), (helper, new_helper)
| vzhong/pystacks | pystacks/optimizer.py | Python | mit | 2,213 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1011090002.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | biomodels/MODEL1011090002 | MODEL1011090002/model.py | Python | cc0-1.0 | 427 |
# __init__.py Common functions for uasyncio primitives
# Copyright (c) 2018-2020 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
try:
import uasyncio as asyncio
except ImportError:
import asyncio
async def _g():
pass
type_coro = type(_g())
# If a callback is passed, run it and return.
# If a coro is passed initiate it and return.
# coros are passed by name i.e. not using function call syntax.
def launch(func, tup_args):
res = func(*tup_args)
if isinstance(res, type_coro):
res = asyncio.create_task(res)
return res
def set_global_exception():
def _handle_exception(loop, context):
import sys
sys.print_exception(context["exception"])
sys.exit()
loop = asyncio.get_event_loop()
loop.set_exception_handler(_handle_exception)
| peterhinch/micropython-async | v3/as_drivers/sched/primitives/__init__.py | Python | mit | 823 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64encode
from datetime import datetime as dt
from hashlib import sha256
from json import dumps, loads
from requests import get, post
from time import mktime, time
from quantconnect.Result import Result
DOWNLOAD_CHUNK_SIZE = 256 * 1024
class Api:
'''QuantConnect.com Interaction Via API.
Args:
userId(int/str): User Id number found at www.quantconnect.com/account.
token(str): Access token found at www.quantconnect.com/account.
debug(boolean): True to enable debugging messages'''
def __init__(self, userId, token, debug = False):
'''Creates a new instance of Api'''
self.__url = 'https://www.quantconnect.com/api/v2/'
self.__userId = userId
self.__token = token
self.__debug = debug
def Execute(self, endpoint, data = None, is_post = False, headers = {}):
'''Execute an authenticated request to the QuantConnect API
Args:
endpoint(str): Request end point.
data(dict): Request values
is_post(boolean): True if POST request, GET request otherwise
headers(dict): Additional headers'''
url = self.__url + endpoint
# Create authenticated timestamped token.
timestamp = str(int(time()))
# Attach timestamp to token for increasing token randomness
timeStampedToken = f'{self.__token}:{timestamp}'
# Hash token for transport
apiToken = sha256(timeStampedToken.encode('utf-8')).hexdigest()
# Attach in headers for basic authentication.
authentication = f'{self.__userId}:{apiToken}'
basic = b64encode(authentication.encode('utf-8')).decode('ascii')
headers.update({ 'Authorization': f'Basic {basic}', 'Timestamp': timestamp })
if is_post:
response = post(url = url, data = data, headers = headers)
else: # Encode the request in parameters of URL.
response = get(url = url, params = data, headers = headers)
if self.__debug:
print(url)
self.__pretty_print(response)
# Convert to object for parsing.
try:
result = response.json()
except:
result = {
'success': False,
'messages': [
'API returned a result which cannot be parsed into JSON. Please inspect the raw result below:',
response.text
]}
if not result['success']:
message = ''
for name, value in result.items():
if isinstance(value, str):
message += f'{name}: {value} '
if isinstance(value, list):
message += f'{name}: {", ".join(value)} '
print(f'There was an exception processing your request: {message}')
return result
def connected(self):
'''Check whether Api is successfully connected with correct credentials'''
return self.Execute('authenticate')['success']
def list_projects(self):
'''Read back a list of all projects on the account for a user.
Returns:
Dictionary that contains for list of projects.
'''
return self.Execute('projects/read')
def create_project(self, name, language):
'''Create a project with the specified name and language via QuantConnect.com API
Args:
name(str): Project name
language(str): Programming language to use (Language must be C#, F# or Py).
Returns:
Dictionary that includes information about the newly created project.
'''
return self.Execute('projects/create',
{
'name': name,
'language': language
}, True)
def read_project(self, projectId):
'''Read in a project from the QuantConnect.com API.
Args:
projectId(int): Project id you own
Returns:
Dictionary that includes information about a specific project
'''
return self.Execute('projects/read', { 'projectId': projectId })
def add_project_file(self, projectId, name, content):
'''Add a file to a project.
Args:
projectId(int): The project to which the file should be added.
name(str): The name of the new file.
content(str): The content of the new file.
Returns:
Disctionary that includes information about the newly created file
'''
return self.Execute('files/create',
{
'projectId' : projectId,
'name' : name,
'content' : content
}, True)
def update_project_filename(self, projectId, oldFileName, newFileName):
'''Update the name of a file
Args:
projectId(int): Project id to which the file belongs
oldFileName(str): The current name of the file
newFileName(str): The new name for the file
Returns:
Dictionary indicating success
'''
return self.Execute('files/update',
{
'projectId' : projectId,
'name': oldFileName,
'newName': newFileName
}, True)
def update_project_file_content(self, projectId, fileName, newFileContents):
'''Update the contents of a file
Args:
projectId(int): Project id to which the file belongs
fileName(str): The name of the file that should be updated
newFileContents(str): The new contents of the file
Returns:
Dictionary indicating success
'''
return self.Execute('files/update',
{
'projectId': projectId,
'name': fileName,
'content': newFileContents
}, True)
def read_project_files(self, projectId):
'''Read all files in a project
Args:
projectId(int): Project id to which the file belongs
Returns:
Dictionary that includes the information about all files in the project
'''
return self.Execute('files/read', { 'projectId': projectId })
def read_project_file(self, projectId, fileName):
'''Read a file in a project
Args:
projectId(int): Project id to which the file belongs
fileName(str): The name of the file
Returns:
Dictionary that includes the file information
'''
return self.Execute('files/read',
{
'projectId': projectId,
'name': fileName
})
def delete_project_file(self, projectId, name):
'''Delete a file in a project
Args:
projectId(int): Project id to which the file belongs
name(str): The name of the file that should be deleted
Returns:
Dictionary indicating success
'''
return self.Execute('files/delete',
{
'projectId' : projectId,
'name' : name
}, True)
def delete_project(self, projectId):
'''Delete a specific project owned by the user from QuantConnect.com
Args:
projectId(int): Project id we own and wish to delete
Returns:
Dictionary indicating success
'''
return self.Execute('projects/delete', { 'projectId' : projectId }, True)
def create_compile(self, projectId):
'''Create a new compile job request for this project id.
Args:
projectId(int): Project id we wish to compile.
Returns:
Dictionary that includes the compile information
'''
return self.Execute('compile/create', { 'projectId' : projectId }, True)
def read_compile(self, projectId, compileId):
'''Read a compile packet job result.
Args:
projectId(int): Project id we sent for compile
compileId(str): Compile id return from the creation request
Returns:
Dictionary that includes the compile information
'''
return self.Execute('compile/read',
{
'projectId' : projectId,
'compileId': compileId
})
def list_backtests(self, projectId):
'''Get a list of backtests for a specific project id
Args:
projectId(int): Project id we'd like to get a list of backtest for
Returns:
Dictionary that includes the list of backtest
'''
return self.Execute('backtests/read', { 'projectId': projectId })
def create_backtest(self, projectId, compileId, backtestName):
'''Create a new backtest from a specified projectId and compileId
Args:
projectId(int): Id for the project to backtest
compileId(str): Compile id return from the creation request
backtestName(str): Name for the new backtest
Returns:
Dictionary that includes the backtest information
'''
return self.Execute('backtests/create',
{
'projectId' : projectId,
'compileId': compileId,
'backtestName': backtestName
}, True)
def read_backtest(self, projectId, backtestId, json_format = True):
'''Read out the full result of a specific backtest.
Args:
projectId(int): Project id for the backtest we'd like to read
backtestId(str): Backtest id for the backtest we'd like to read
parsed(boolean): True if parse the results as pandas.DataFrame
Returns:
dictionary that includes the backtest information or Result object
'''
json = self.Execute('backtests/read',
{
'projectId' : projectId,
'backtestId': backtestId
})
return json if json_format else Result(json)
def read_backtest_report(self, projectId, backtestId, save=False):
'''Read out the report of a backtest in the project id specified.
Args:
projectId(int): Project id to read.
backtestId(str): Specific backtest id to read.
save(boolean): True if data should be saved to disk
Returns:
Dictionary that contains the backtest report
'''
json = self.Execute('backtests/read/report',
{
'projectId': projectId,
'backtestId': backtestId,
}, True)
if save and json['success']:
with open(backtestId + '.html', "w") as fp:
fp.write(json['report'])
print(f'Log saved as {backtestId}.html')
return json
def update_backtest(self, projectId, backtestId, backtestName = '', backtestNote = ''):
'''Update the backtest name.
Args:
projectId(str): Project id to update
backtestId(str): Specific backtest id to read
backtestName(str): New backtest name to set
note(str): Note attached to the backtest
Returns:
Dictionary indicating success
'''
return self.Execute('backtests/update',
{
'projectId' : projectId,
'backtestId': backtestId,
'name': backtestName,
'note': backtestNote
}, True)
def delete_backtest(self, projectId, backtestId):
'''Delete a backtest from the specified project and backtestId.
Args:
projectId(int): Project for the backtest we want to delete
backtestId(str): Backtest id we want to delete
Returns:
Dictionary indicating success
'''
return self.Execute('backtests/delete',
{
'projectId': projectId,
'backtestId': backtestId
})
def list_live_algorithms(self, status, startTime=None, endTime=None):
'''Get a list of live running algorithms for a logged in user.
Args:
status(str): Filter the statuses of the algorithms returned from the api
Only the following statuses are supported by the Api:
"Liquidated", "Running", "RuntimeError", "Stopped",
startTime(datetime): Earliest launched time of the algorithms returned by the Api
endTime(datetime): Latest launched time of the algorithms returned by the Api
Returns:
Dictionary that includes the list of live algorithms
'''
if (status != None and
status != "Running" and
status != "RuntimeError" and
status != "Stopped" and
status != "Liquidated"):
raise ValueError(
"The Api only supports Algorithm Statuses of Running, Stopped, RuntimeError and Liquidated")
if endTime == None:
endTime = dt.utcnow()
return self.Execute('live/read',
{
'status': str(status),
'end': mktime(endTime.timetuple()),
'start': 0 if startTime == None else mktime(startTime.timetuple())
})
def create_live_algorithm(self, projectId, compileId, serverType, baseLiveAlgorithmSettings, versionId="-1"):
'''Create a new live algorithm for a logged in user.
Args:
projectId(int): Id of the project on QuantConnect
compileId(str): Id of the compilation on QuantConnect
serverType(str): Type of server instance that will run the algorithm
baseLiveAlgorithmSettings(BaseLiveAlgorithmSettings): Brokerage specific
versionId(str): The version of the Lean used to run the algorithm.
-1 is master, however, sometimes this can create problems with live deployments.
If you experience problems using, try specifying the version of Lean you would like to use.
Returns:
Dictionary that contains information regarding the new algorithm
'''
return self.Execute('live/create',
{
'projectId': projectId,
'compileId': compileId,
'versionId': versionId,
'serverType': serverType,
'brokerage': baseLiveAlgorithmSettings
},
True,
headers = {"Accept": "application/json"})
def read_live_algorithm(self, projectId, deployId = None, json_format = True):
'''Read out a live algorithm in the project id specified.
Args:
projectId(int): Project id to read
deployId: Specific instance id to read
Returns:
Dictionary that contains information regarding the live algorithm or Result object
'''
json = self.Execute('live/read',
{
'projectId': projectId,
'deployId': deployId
})
return json if json_format else Result(json)
def liquidate_live_algorithm(self, projectId):
'''Liquidate a live algorithm from the specified project.
Args:
projectId(int): Project for the live instance we want to liquidate
Returns:
Dictionary indicating success
'''
return self.Execute('live/update/liquidate', { 'projectId': projectId }, True)
def stop_live_algorithm(self, projectId):
'''Stop a live algorithm from the specified project.
Args:
projectId(int): Project for the live instance we want to stop.
Returns:
Dictionary indicating success
'''
return self.Execute('live/update/stop', { 'projectId': projectId }, True)
def read_live_logs(self, projectId, algorithmId, startTime=None, endTime=None, save=False):
'''Gets the logs of a specific live algorithm.
Args:
projectId(int): Project Id of the live running algorithm
algorithmId(str): Algorithm Id of the live running algorithm
startTime(datetime): No logs will be returned before this time. Should be in UTC
endTime(datetime): No logs will be returned after this time. Should be in UTC
save(boolean): True if data should be saved to disk
Returns:
List of strings that represent the logs of the algorithm
'''
if endTime == None:
endTime = dt.utcnow()
json = self.Execute('live/read/log',
{
'format': 'json',
'projectId': projectId,
'algorithmId': algorithmId,
'end': mktime(endTime.timetuple()),
'start': 0 if startTime == None else mktime(startTime.timetuple())
})
if save and json['success']:
with open(algorithmId + '.txt', "w") as fp:
fp.write('\n'.join(json['LiveLogs']))
print(f'Log saved as {algorithmId}.txt')
return json
def read_data_link(self, symbol, securityType, market, resolution, date):
'''Gets the link to the downloadable data.
Args:
symbol(str): Symbol of security of which data will be requested
securityType(str): Type of underlying asset
market(str): e.g. CBOE, CBOT, FXCM, GDAX etc.
resolution(str): Resolution of data requested
date: Date of the data requested
Returns:
Dictionary that contains the link to the downloadable data.
'''
return self.Execute('data/read',
{
'format': 'link',
'ticker': symbol.lower(),
'type': securityType.lower(),
'market': market.lower(),
'resolution': resolution.lower(),
'date': date.strftime("%Y%m%d")
})
def download_data(self, symbol, securityType, market, resolution, date, fileName):
'''Method to download and save the data purchased through QuantConnect
Args:
symbol(str): Symbol of security of which data will be requested.
securityType(str): Type of underlying asset
market(str): e.g. CBOE, CBOT, FXCM, GDAX etc.
resolution(str): Resolution of data requested.
date(datetime): Date of the data requested.
fileName(str): file name of data download
Returns:
Boolean indicating whether the data was successfully downloaded or not
'''
# Get a link to the data
link = self.read_data_link(symbol, securityType, market, resolution, date)
# Make sure the link was successfully retrieved
if not link['success']:
return False
# download and save the data
with open(fileName + '.zip', "wb") as code:
request = get(link['link'], stream=True)
for chunk in request.iter_content(DOWNLOAD_CHUNK_SIZE):
code.write(chunk)
return True
def __pretty_print(self, result):
'''Print out a nice formatted version of the request'''
print ('')
try:
parsed = loads(result.text)
print (dumps(parsed, indent=4, sort_keys=True))
except Exception as err:
print ('Fall back error (text print)')
print ('')
print (result.text)
print ('')
print (err)
print ('') | jameschch/Lean | PythonToolbox/quantconnect/api.py | Python | apache-2.0 | 20,315 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for generating wrapper functions for PPAPI methods.
"""
from datetime import datetime
import os
import sys
from idl_c_proto import CGen
from idl_generator import Generator
from idl_log import ErrOut, InfoOut, WarnOut
from idl_outfile import IDLOutFile
class PPKind(object):
@staticmethod
def ChoosePPFunc(iface, ppb_func, ppp_func):
name = iface.node.GetName()
if name.startswith("PPP"):
return ppp_func
elif name.startswith("PPB"):
return ppb_func
else:
raise Exception('Unknown PPKind for ' + name)
class Interface(object):
"""Tracks information about a particular interface version.
- struct_name: the struct type used by the ppapi headers to hold the
method pointers (the vtable).
- needs_wrapping: True if a method in the interface needs wrapping.
- header_file: the name of the header file that defined this interface.
"""
def __init__(self, interface_node, release, version,
struct_name, needs_wrapping, header_file):
self.node = interface_node
self.release = release
self.version = version
self.struct_name = struct_name
# We may want finer grained filtering (method level), but it is not
# yet clear how to actually do that.
self.needs_wrapping = needs_wrapping
self.header_file = header_file
class WrapperGen(Generator):
"""WrapperGen - An abstract class that generates wrappers for PPAPI methods.
This generates a wrapper PPB and PPP GetInterface, which directs users
to wrapper PPAPI methods. Wrapper PPAPI methods may perform arbitrary
work before invoking the real PPAPI method (supplied by the original
GetInterface functions).
Subclasses must implement GenerateWrapperForPPBMethod (and PPP).
Optionally, subclasses can implement InterfaceNeedsWrapper to
filter out interfaces that do not actually need wrappers (those
interfaces can jump directly to the original interface functions).
"""
def __init__(self, wrapper_prefix, s1, s2, s3):
Generator.__init__(self, s1, s2, s3)
self.wrapper_prefix = wrapper_prefix
self._skip_opt = False
self.output_file = None
self.cgen = CGen()
def SetOutputFile(self, fname):
self.output_file = fname
def GenerateRelease(self, ast, release, options):
return self.GenerateRange(ast, [release], options)
@staticmethod
def GetHeaderName(name):
"""Get the corresponding ppapi .h file from each IDL filename.
"""
name = os.path.splitext(name)[0] + '.h'
return 'ppapi/c/' + name
def WriteCopyrightGeneratedTime(self, out):
now = datetime.now()
c = """/* Copyright (c) %s The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Last generated from IDL: %s. */
""" % (now.year, datetime.ctime(now))
out.Write(c)
def GetWrapperMetadataName(self):
return '__%sWrapperInfo' % self.wrapper_prefix
def GenerateHelperFunctions(self, out):
"""Generate helper functions to avoid dependencies on libc.
"""
out.Write("""/* Use local strcmp to avoid dependency on libc. */
static int mystrcmp(const char* s1, const char *s2) {
while((*s1 && *s2) && (*s1++ == *s2++));
return *(--s1) - *(--s2);
}\n
""")
def GenerateFixedFunctions(self, out):
"""Write out the set of constant functions (those that do not depend on
the current Pepper IDL).
"""
out.Write("""
static PPB_GetInterface __real_PPBGetInterface;
static PPP_GetInterface_Type __real_PPPGetInterface;
void __set_real_%(wrapper_prefix)s_PPBGetInterface(PPB_GetInterface real) {
__real_PPBGetInterface = real;
}
void __set_real_%(wrapper_prefix)s_PPPGetInterface(PPP_GetInterface_Type real) {
__real_PPPGetInterface = real;
}
/* Map interface string -> wrapper metadata */
static struct %(wrapper_struct)s *%(wrapper_prefix)sPPBShimIface(
const char *name) {
struct %(wrapper_struct)s **next = s_ppb_wrappers;
while (*next != NULL) {
if (mystrcmp(name, (*next)->iface_macro) == 0) return *next;
++next;
}
return NULL;
}
/* Map interface string -> wrapper metadata */
static struct %(wrapper_struct)s *%(wrapper_prefix)sPPPShimIface(
const char *name) {
struct %(wrapper_struct)s **next = s_ppp_wrappers;
while (*next != NULL) {
if (mystrcmp(name, (*next)->iface_macro) == 0) return *next;
++next;
}
return NULL;
}
const void *__%(wrapper_prefix)s_PPBGetInterface(const char *name) {
struct %(wrapper_struct)s *wrapper = %(wrapper_prefix)sPPBShimIface(name);
if (wrapper == NULL) {
/* We don't have an IDL for this, for some reason. Take our chances. */
return (*__real_PPBGetInterface)(name);
}
/* Initialize the real_iface if it hasn't been. The wrapper depends on it. */
if (wrapper->real_iface == NULL) {
const void *iface = (*__real_PPBGetInterface)(name);
if (NULL == iface) return NULL;
wrapper->real_iface = iface;
}
if (wrapper->wrapped_iface) {
return wrapper->wrapped_iface;
} else {
return wrapper->real_iface;
}
}
const void *__%(wrapper_prefix)s_PPPGetInterface(const char *name) {
struct %(wrapper_struct)s *wrapper = %(wrapper_prefix)sPPPShimIface(name);
if (wrapper == NULL) {
/* We don't have an IDL for this, for some reason. Take our chances. */
return (*__real_PPPGetInterface)(name);
}
/* Initialize the real_iface if it hasn't been. The wrapper depends on it. */
if (wrapper->real_iface == NULL) {
const void *iface = (*__real_PPPGetInterface)(name);
if (NULL == iface) return NULL;
wrapper->real_iface = iface;
}
if (wrapper->wrapped_iface) {
return wrapper->wrapped_iface;
} else {
return wrapper->real_iface;
}
}
""" % { 'wrapper_struct' : self.GetWrapperMetadataName(),
'wrapper_prefix' : self.wrapper_prefix,
} )
############################################################
def InterfaceNeedsWrapper(self, iface, releases):
"""Return true if the interface has ANY methods that need wrapping.
"""
return True
def OwnHeaderFile(self):
"""Return the header file that specifies the API of this wrapper.
We do not generate the header files. """
raise Exception('Child class must implement this')
############################################################
def DetermineInterfaces(self, ast, releases):
"""Get a list of interfaces along with whatever metadata we need.
"""
iface_releases = []
for filenode in ast.GetListOf('File'):
# If this file has errors, skip it
if filenode in self.skip_list:
InfoOut.Log('WrapperGen: Skipping %s due to errors\n' %
filenode.GetName())
continue
file_name = self.GetHeaderName(filenode.GetName())
ifaces = filenode.GetListOf('Interface')
for iface in ifaces:
releases_for_iface = iface.GetUniqueReleases(releases)
for release in releases_for_iface:
version = iface.GetVersion(release)
struct_name = self.cgen.GetStructName(iface, release,
include_version=True)
needs_wrap = self.InterfaceVersionNeedsWrapping(iface, version)
if not needs_wrap:
InfoOut.Log('Interface %s ver %s does not need wrapping' %
(struct_name, version))
iface_releases.append(
Interface(iface, release, version,
struct_name, needs_wrap, file_name))
return iface_releases
def GenerateIncludes(self, iface_releases, out):
"""Generate the list of #include that define the original interfaces.
"""
self.WriteCopyrightGeneratedTime(out)
# First include own header.
out.Write('#include "%s"\n\n' % self.OwnHeaderFile())
# Get typedefs for PPB_GetInterface.
out.Write('#include "%s"\n' % self.GetHeaderName('ppb.h'))
# Get a conservative list of all #includes that are needed,
# whether it requires wrapping or not. We currently depend on the macro
# string for comparison, even when it is not wrapped, to decide when
# to use the original/real interface.
header_files = set()
for iface in iface_releases:
header_files.add(iface.header_file)
for header in sorted(header_files):
out.Write('#include "%s"\n' % header)
out.Write('\n')
def WrapperMethodPrefix(self, iface, release):
return '%s_%s_%s_' % (self.wrapper_prefix, release, iface.GetName())
def GetReturnArgs(self, ret_type, args_spec):
if ret_type != 'void':
ret = 'return '
else:
ret = ''
if args_spec:
args = []
for arg in args_spec:
args.append(arg[1])
args = ', '.join(args)
else:
args = ''
return (ret, args)
def GenerateWrapperForPPBMethod(self, iface, member):
result = []
func_prefix = self.WrapperMethodPrefix(iface.node, iface.release)
sig = self.cgen.GetSignature(member, iface.release, 'store',
func_prefix, False)
result.append('static %s {\n' % sig)
result.append(' while(1) { /* Not implemented */ } \n')
result.append('}\n')
return result
def GenerateWrapperForPPPMethod(self, iface, member):
result = []
func_prefix = self.WrapperMethodPrefix(iface.node, iface.release)
sig = self.cgen.GetSignature(member, iface.release, 'store',
func_prefix, False)
result.append('static %s {\n' % sig)
result.append(' while(1) { /* Not implemented */ } \n')
result.append('}\n')
return result
def GenerateWrapperForMethods(self, iface_releases, comments=True):
"""Return a string representing the code for each wrapper method
(using a string rather than writing to the file directly for testing.)
"""
result = []
for iface in iface_releases:
if not iface.needs_wrapping:
if comments:
result.append('/* Not generating wrapper methods for %s */\n\n' %
iface.struct_name)
continue
if comments:
result.append('/* Begin wrapper methods for %s */\n\n' %
iface.struct_name)
generator = PPKind.ChoosePPFunc(iface,
self.GenerateWrapperForPPBMethod,
self.GenerateWrapperForPPPMethod)
for member in iface.node.GetListOf('Member'):
# Skip the method if it's not actually in the release.
if not member.InReleases([iface.release]):
continue
result.extend(generator(iface, member))
if comments:
result.append('/* End wrapper methods for %s */\n\n' %
iface.struct_name)
return ''.join(result)
def GenerateWrapperInterfaces(self, iface_releases, out):
for iface in iface_releases:
if not iface.needs_wrapping:
out.Write('/* Not generating wrapper interface for %s */\n\n' %
iface.struct_name)
continue
out.Write('struct %s %s_Wrappers_%s = {\n' % (iface.struct_name,
self.wrapper_prefix,
iface.struct_name))
methods = []
for member in iface.node.GetListOf('Member'):
# Skip the method if it's not actually in the release.
if not member.InReleases([iface.release]):
continue
prefix = self.WrapperMethodPrefix(iface.node, iface.release)
cast = self.cgen.GetSignature(member, iface.release, 'return',
prefix='',
func_as_ptr=True,
ptr_prefix='',
include_name=False)
methods.append(' .%s = (%s)&%s%s' % (member.GetName(),
cast,
prefix,
member.GetName()))
out.Write(' ' + ',\n '.join(methods) + '\n')
out.Write('};\n\n')
def GetWrapperInfoName(self, iface):
return '%s_WrapperInfo_%s' % (self.wrapper_prefix, iface.struct_name)
def GenerateWrapperInfoAndCollection(self, iface_releases, out):
for iface in iface_releases:
iface_macro = self.cgen.GetInterfaceMacro(iface.node, iface.version)
if iface.needs_wrapping:
wrap_iface = '(void *) &%s_Wrappers_%s' % (self.wrapper_prefix,
iface.struct_name)
else:
wrap_iface = 'NULL /* Still need slot for real_iface */'
out.Write("""static struct %s %s = {
.iface_macro = %s,
.wrapped_iface = %s,
.real_iface = NULL
};\n\n""" % (self.GetWrapperMetadataName(),
self.GetWrapperInfoName(iface),
iface_macro,
wrap_iface))
# Now generate NULL terminated arrays of the above wrapper infos.
ppb_wrapper_infos = []
ppp_wrapper_infos = []
for iface in iface_releases:
appender = PPKind.ChoosePPFunc(iface,
ppb_wrapper_infos.append,
ppp_wrapper_infos.append)
appender(' &%s' % self.GetWrapperInfoName(iface))
ppb_wrapper_infos.append(' NULL')
ppp_wrapper_infos.append(' NULL')
out.Write(
'static struct %s *s_ppb_wrappers[] = {\n%s\n};\n\n' %
(self.GetWrapperMetadataName(), ',\n'.join(ppb_wrapper_infos)))
out.Write(
'static struct %s *s_ppp_wrappers[] = {\n%s\n};\n\n' %
(self.GetWrapperMetadataName(), ',\n'.join(ppp_wrapper_infos)))
def DeclareWrapperInfos(self, iface_releases, out):
"""The wrapper methods usually need access to the real_iface, so we must
declare these wrapper infos ahead of time (there is a circular dependency).
"""
out.Write('/* BEGIN Declarations for all Wrapper Infos */\n\n')
for iface in iface_releases:
out.Write('static struct %s %s;\n' %
(self.GetWrapperMetadataName(), self.GetWrapperInfoName(iface)))
out.Write('/* END Declarations for all Wrapper Infos. */\n\n')
def GenerateRange(self, ast, releases, options):
"""Generate shim code for a range of releases.
"""
# Remember to set the output filename before running this.
out_filename = self.output_file
if out_filename is None:
ErrOut.Log('Did not set filename for writing out wrapper\n')
return 1
InfoOut.Log("Generating %s for %s" % (out_filename, self.wrapper_prefix))
out = IDLOutFile(out_filename)
# Get a list of all the interfaces along with metadata.
iface_releases = self.DetermineInterfaces(ast, releases)
# Generate the includes.
self.GenerateIncludes(iface_releases, out)
# Write out static helper functions (mystrcmp).
self.GenerateHelperFunctions(out)
# Declare list of WrapperInfo before actual wrapper methods, since
# they reference each other.
self.DeclareWrapperInfos(iface_releases, out)
# Generate wrapper functions for each wrapped method in the interfaces.
result = self.GenerateWrapperForMethods(iface_releases)
out.Write(result)
# Collect all the wrapper functions into interface structs.
self.GenerateWrapperInterfaces(iface_releases, out)
# Generate a table of the wrapped interface structs that can be looked up.
self.GenerateWrapperInfoAndCollection(iface_releases, out)
# Write out the IDL-invariant functions.
self.GenerateFixedFunctions(out)
out.Close()
return 0
| keishi/chromium | ppapi/generators/idl_gen_wrapper.py | Python | bsd-3-clause | 15,759 |
"""Hardware driver related utilities
Everything in this module should rely on /proc or /sys only, no executable calls
"""
# Standard Library
import os
import re
# Lutris Modules
from lutris.util.log import logger
MIN_RECOMMENDED_NVIDIA_DRIVER = 415
def get_nvidia_driver_info():
"""Return information about NVidia drivers"""
version_file_path = "/proc/driver/nvidia/version"
if not os.path.exists(version_file_path):
return
with open(version_file_path, encoding='utf-8') as version_file:
content = version_file.readlines()
nvrm_version = content[0].split(': ')[1].strip().split()
return {
'nvrm': {
'vendor': nvrm_version[0],
'platform': nvrm_version[1],
'arch': nvrm_version[2],
'version': nvrm_version[5],
'date': ' '.join(nvrm_version[6:])
}
}
return
def get_nvidia_gpu_ids():
"""Return the list of Nvidia GPUs"""
return os.listdir("/proc/driver/nvidia/gpus")
def get_nvidia_gpu_info(gpu_id):
"""Return details about a GPU"""
with open("/proc/driver/nvidia/gpus/%s/information" % gpu_id, encoding='utf-8') as info_file:
content = info_file.readlines()
infos = {}
for line in content:
key, value = line.split(":", 1)
infos[key] = value.strip()
return infos
def is_nvidia():
"""Return true if the Nvidia drivers are currently in use"""
return os.path.exists("/proc/driver/nvidia")
def get_gpus():
"""Return GPUs connected to the system"""
if not os.path.exists("/sys/class/drm"):
logger.error("No GPU available on this system!")
return
for cardname in os.listdir("/sys/class/drm/"):
if re.match(r"^card\d$", cardname):
yield cardname
def get_gpu_info(card):
"""Return information about a GPU"""
infos = {"DRIVER": "", "PCI_ID": "", "PCI_SUBSYS_ID": ""}
try:
with open("/sys/class/drm/%s/device/uevent" % card, encoding='utf-8') as card_uevent:
content = card_uevent.readlines()
except FileNotFoundError:
logger.error("Unable to read driver information for card %s", card)
return infos
for line in content:
key, value = line.split("=", 1)
infos[key] = value.strip()
return infos
def is_amd():
"""Return true if the system uses the AMD driver"""
for card in get_gpus():
if get_gpu_info(card)["DRIVER"] == "amdgpu":
return True
return False
def check_driver():
"""Report on the currently running driver"""
if is_nvidia():
driver_info = get_nvidia_driver_info()
# pylint: disable=logging-format-interpolation
logger.info("Using {vendor} drivers {version} for {arch}".format(**driver_info["nvrm"]))
gpus = get_nvidia_gpu_ids()
for gpu_id in gpus:
gpu_info = get_nvidia_gpu_info(gpu_id)
logger.info("GPU: %s", gpu_info.get("Model"))
for card in get_gpus():
# pylint: disable=logging-format-interpolation
logger.info("GPU: {PCI_ID} {PCI_SUBSYS_ID} using {DRIVER} driver".format(**get_gpu_info(card)))
def is_outdated():
if not is_nvidia():
return False
driver_info = get_nvidia_driver_info()
driver_version = driver_info["nvrm"]["version"]
if not driver_version:
logger.error("Failed to get Nvidia version")
return True
major_version = int(driver_version.split(".")[0])
return major_version < MIN_RECOMMENDED_NVIDIA_DRIVER
| lutris/lutris | lutris/util/graphics/drivers.py | Python | gpl-3.0 | 3,558 |
from __future__ import unicode_literals
from decimal import Decimal
import datetime
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, RegexValidator
from django.db.models import Manager, Q
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.db import models
from django.utils.text import slugify
from django.utils.translation import pgettext_lazy
from django_prices.models import PriceField
from jsonfield import JSONField
from model_utils.managers import InheritanceManager
from mptt.managers import TreeManager
from mptt.models import MPTTModel
from satchless.item import ItemRange, Item, InsufficientStock
from unidecode import unidecode
from versatileimagefield.fields import VersatileImageField
from .discounts import get_product_discounts
from .fields import WeightField
from saleor.product.utils import get_attributes_display_map
@python_2_unicode_compatible
class Category(MPTTModel):
name = models.CharField(
pgettext_lazy('Category field', 'name'), max_length=128)
slug = models.SlugField(
pgettext_lazy('Category field', 'slug'), max_length=50)
description = models.TextField(
pgettext_lazy('Category field', 'description'), blank=True)
parent = models.ForeignKey(
'self', null=True, blank=True, related_name='children',
verbose_name=pgettext_lazy('Category field', 'parent'))
hidden = models.BooleanField(
pgettext_lazy('Category field', 'hidden'), default=False)
objects = Manager()
tree = TreeManager()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('product:category', kwargs={'path': self.get_full_path(),
'category_id': self.id})
def get_full_path(self):
if not self.parent_id:
return self.slug
return '/'.join(
[node.slug for node in self.get_ancestors(include_self=True)])
class Meta:
verbose_name_plural = 'categories'
app_label = 'product'
def set_hidden_descendants(self, hidden):
self.get_descendants().update(hidden=hidden)
class ProductManager(InheritanceManager):
def get_available_products(self):
today = datetime.datetime.today()
return self.get_queryset().filter(
Q(available_on__lte=today) | Q(available_on__isnull=True))
@python_2_unicode_compatible
class Product(models.Model, ItemRange):
name = models.CharField(
pgettext_lazy('Product field', 'name'), max_length=128)
description = models.TextField(
verbose_name=pgettext_lazy('Product field', 'description'))
categories = models.ManyToManyField(
Category, verbose_name=pgettext_lazy('Product field', 'categories'),
related_name='products')
price = PriceField(
pgettext_lazy('Product field', 'price'),
currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2)
weight = WeightField(
pgettext_lazy('Product field', 'weight'), unit=settings.DEFAULT_WEIGHT,
max_digits=6, decimal_places=2)
available_on = models.DateField(
pgettext_lazy('Product field', 'available on'), blank=True, null=True)
attributes = models.ManyToManyField(
'ProductAttribute', related_name='products', blank=True)
objects = ProductManager()
class Meta:
app_label = 'product'
def __iter__(self):
if not hasattr(self, '__variants'):
setattr(self, '__variants', self.variants.all())
return iter(getattr(self, '__variants'))
def __repr__(self):
class_ = type(self)
return '<%s.%s(pk=%r, name=%r)>' % (
class_.__module__, class_.__name__, self.pk, self.name)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('product:details', kwargs={'slug': self.get_slug(),
'product_id': self.id})
def get_slug(self):
return slugify(smart_text(unidecode(self.name)))
def get_formatted_price(self, price):
return "{0} {1}".format(price.gross, price.currency)
def get_price_per_item(self, item, discounts=None, **kwargs):
price = self.price
if price and discounts:
discounts = list(get_product_discounts(self, discounts, **kwargs))
if discounts:
modifier = max(discounts)
price += modifier
return price
def admin_get_price_min(self):
price = self.get_price_range().min_price
return self.get_formatted_price(price)
admin_get_price_min.short_description = pgettext_lazy(
'Product admin page', 'Minimum price')
def admin_get_price_max(self):
price = self.get_price_range().max_price
return self.get_formatted_price(price)
admin_get_price_max.short_description = pgettext_lazy(
'Product admin page', 'Maximum price')
def is_in_stock(self):
return any(variant.is_in_stock() for variant in self)
def get_first_category(self):
for category in self.categories.all():
if not category.hidden:
return category
return None
@python_2_unicode_compatible
class ProductVariant(models.Model, Item):
sku = models.CharField(
pgettext_lazy('Variant field', 'SKU'), max_length=32, unique=True)
name = models.CharField(
pgettext_lazy('Variant field', 'variant name'), max_length=100, blank=True)
price_override = PriceField(
pgettext_lazy('Variant field', 'price override'),
currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2,
blank=True, null=True)
weight_override = WeightField(
pgettext_lazy('Variant field', 'weight override'),
unit=settings.DEFAULT_WEIGHT, max_digits=6, decimal_places=2,
blank=True, null=True)
product = models.ForeignKey(Product, related_name='variants')
attributes = JSONField(pgettext_lazy('Variant field', 'attributes'),
default={})
objects = InheritanceManager()
class Meta:
app_label = 'product'
def __str__(self):
return self.name or self.sku
def get_weight(self):
return self.weight_override or self.product.weight
def check_quantity(self, quantity):
available_quantity = self.get_stock_quantity()
if quantity > available_quantity:
raise InsufficientStock(self)
def get_stock_quantity(self):
return sum([stock.quantity for stock in self.stock.all()])
def get_price_per_item(self, discounts=None, **kwargs):
price = self.price_override or self.product.price
if discounts:
discounts = list(get_product_discounts(self, discounts, **kwargs))
if discounts:
modifier = max(discounts)
price += modifier
return price
def get_absolute_url(self):
slug = self.product.get_slug()
product_id = self.product.id
return reverse('product:details',
kwargs={'slug': slug, 'product_id': product_id})
def as_data(self):
return {
'product_name': str(self),
'product_id': self.product.pk,
'variant_id': self.pk,
'unit_price': str(self.get_price_per_item().gross)}
def is_shipping_required(self):
return True
def is_in_stock(self):
return any([stock_item.quantity > 0 for stock_item in self.stock.all()])
def get_attribute(self, pk):
return self.attributes.get(str(pk))
def display_variant(self, attributes=None):
if attributes is None:
attributes = self.product.attributes.all()
values = get_attributes_display_map(self, attributes).values()
if values:
return ', '.join([smart_text(value) for value in values])
else:
return smart_text(self)
def display_product(self, attributes=None):
return '%s (%s)' % (smart_text(self.product),
self.display_variant(attributes=attributes))
@python_2_unicode_compatible
class Stock(models.Model):
variant = models.ForeignKey(
ProductVariant, related_name='stock',
verbose_name=pgettext_lazy('Stock item field', 'variant'))
location = models.CharField(
pgettext_lazy('Stock item field', 'location'), max_length=100)
quantity = models.IntegerField(
pgettext_lazy('Stock item field', 'quantity'),
validators=[MinValueValidator(0)], default=Decimal(1))
cost_price = PriceField(
pgettext_lazy('Stock item field', 'cost price'),
currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2,
blank=True, null=True)
class Meta:
app_label = 'product'
unique_together = ('variant', 'location')
def __str__(self):
return "%s - %s" % (self.variant.name, self.location)
@python_2_unicode_compatible
class ProductAttribute(models.Model):
name = models.SlugField(
pgettext_lazy('Product attribute field', 'internal name'),
max_length=50, unique=True)
display = models.CharField(
pgettext_lazy('Product attribute field', 'display name'),
max_length=100)
class Meta:
ordering = ['name']
def __str__(self):
return self.display
def get_formfield_name(self):
return slugify('attribute-%s' % self.name)
def has_values(self):
return self.values.exists()
@python_2_unicode_compatible
class AttributeChoiceValue(models.Model):
display = models.CharField(
pgettext_lazy('Attribute choice value field', 'display name'),
max_length=100)
color = models.CharField(
pgettext_lazy('Attribute choice value field', 'color'),
max_length=7,
validators=[RegexValidator('^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$')],
blank=True)
image = VersatileImageField(
pgettext_lazy('Attribute choice value field', 'image'),
upload_to='attributes', blank=True, null=True)
attribute = models.ForeignKey(ProductAttribute, related_name='values')
def __str__(self):
return self.display
| Drekscott/Motlaesaleor | saleor/product/models/base.py | Python | bsd-3-clause | 10,340 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Winlogon Windows Registry plugin."""
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import winlogon
from tests.parsers.winreg_plugins import test_lib
class WinlogonPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Winlogon Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path: the Windows Registry key path.
time_string: string containing the key last written date and time.
Returns:
A Windows Registry key (instance of dfwinreg.WinRegistryKey).
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
u'Winlogon', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
# Setup Winlogon values.
value_data = u'1'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'AutoAdminLogon', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = b'\x00\x00\x00\x01'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'AutoRestartShell', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = u'0 0 0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Background', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'10'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'CachedLogonsCount', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'no'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'DebugServerCommand', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u''.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'DefaultDomainName', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'user'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'DefaultUserName', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = b'\x00\x00\x00\x01'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'DisableCAD', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = b'\x00\x00\x00\x00'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'ForceUnlockLogon', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = u''.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'LegalNoticeCaption', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u''.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'LegalNoticeText', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = b'\x00\x00\x00\x05'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'PasswordExpiryWarning', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'PowerdownAfterShutdown', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'{A520A1A4-1780-4FF6-BD18-167343C5AF16}'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'PreCreateKnownFolders', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'1'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'ReportBootOk', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'explorer.exe'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Shell', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = b'\x00\x00\x00\x2b'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'ShutdownFlags', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'ShutdownWithoutLogon', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'C:\\Windows\\system32\\userinit.exe'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Userinit', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'SystemPropertiesPerformance.exe/pagefile'.encode(
u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'VMApplet', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = u'0'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'WinStationsDisabled', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
# Setup registered event handlers.
notify = dfwinreg_fake.FakeWinRegistryKey(u'Notify')
registry_key.AddSubkey(notify)
navlogon = dfwinreg_fake.FakeWinRegistryKey(
u'NavLogon', last_written_time=filetime.timestamp)
notify.AddSubkey(navlogon)
value_data = u'NavLogon.dll'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'DllName', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
navlogon.AddValue(registry_value)
value_data = u'NavLogoffEvent'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Logoff', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
navlogon.AddValue(registry_value)
value_data = u'NavStartShellEvent'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'StartShell', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
navlogon.AddValue(registry_value)
secret_malware = dfwinreg_fake.FakeWinRegistryKey(
u'SecretMalware', last_written_time=filetime.timestamp)
notify.AddSubkey(secret_malware)
value_data = b'\x00\x00\x00\x00'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Asynchronous', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
secret_malware.AddValue(registry_value)
value_data = u'secret_malware.dll'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'DllName', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = b'\x00\x00\x00\x00'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Impersonate', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
secret_malware.AddValue(registry_value)
value_data = u'secretEventLock'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Lock', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventLogoff'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Logoff', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventLogon'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Logon', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventShutdown'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Shutdown', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventSmartCardLogonNotify'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'SmartCardLogonNotify', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventStartShell'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'StartShell', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventStartup'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Startup', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventStopScreenSaver'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'StopScreenSaver', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
value_data = u'secretEventUnlock'.encode(u'utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
u'Unlock', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
secret_malware.AddValue(registry_value)
return registry_key
def testProcess(self):
"""Tests the Process function on created key."""
key_path = (
u'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion')
time_string = u'2013-01-30 10:47:57'
registry_key = self._CreateTestKey(key_path, time_string)
plugin_object = winlogon.WinlogonPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin_object)
self.assertEqual(len(storage_writer.events), 14)
# Because the order the subkeys are parsed are not guaranteed we will sort
# the events.
# TODO: look into this.
event_objects = sorted(
storage_writer.events, key=lambda evt: evt.EqualityString())
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(time_string)
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'[{0:s}\\Notify\\NavLogon] '
u'Application: NavLogon '
u'Command: NavLogon.dll '
u'Handler: NavLogoffEvent '
u'Trigger: Logoff').format(key_path)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
event_object = event_objects[13]
expected_timestamp = timelib.Timestamp.CopyFromString(time_string)
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_message = (
u'[{0:s}] '
u'Application: VmApplet '
u'Command: SystemPropertiesPerformance.exe/pagefile '
u'Trigger: Logon').format(key_path)
expected_short_message = u'{0:s}...'.format(expected_message[0:77])
self._TestGetMessageStrings(
event_object, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| dc3-plaso/plaso | tests/parsers/winreg_plugins/winlogon.py | Python | apache-2.0 | 12,249 |
from __future__ import absolute_import, division, unicode_literals
import os
import sys
import unittest
import warnings
from difflib import unified_diff
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, convertExpected
from html5lib import html5parser, treewalkers, treebuilders, constants
def PullDOMAdapter(node):
from xml.dom import Node
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, COMMENT, CHARACTERS
if node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise NotImplementedError("DOCTYPE nodes are not supported by PullDOM")
elif node.nodeType == Node.COMMENT_NODE:
yield COMMENT, node
elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
yield CHARACTERS, node
elif node.nodeType == Node.ELEMENT_NODE:
yield START_ELEMENT, node
for childNode in node.childNodes:
for event in PullDOMAdapter(childNode):
yield event
yield END_ELEMENT, node
else:
raise NotImplementedError("Node type not supported: " + str(node.nodeType))
treeTypes = {
"DOM": {"builder": treebuilders.getTreeBuilder("dom"),
"walker": treewalkers.getTreeWalker("dom")},
"PullDOM": {"builder": treebuilders.getTreeBuilder("dom"),
"adapter": PullDOMAdapter,
"walker": treewalkers.getTreeWalker("pulldom")},
}
# Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['ElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
pass
else:
treeTypes['cElementTree'] = \
{"builder": treebuilders.getTreeBuilder("etree", ElementTree),
"walker": treewalkers.getTreeWalker("etree", ElementTree)}
try:
import lxml.etree as ElementTree # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml_native'] = \
{"builder": treebuilders.getTreeBuilder("lxml"),
"walker": treewalkers.getTreeWalker("lxml")}
# Try whatever etree implementations are available from a list that are
#"supposed" to work
try:
import pxdom
treeTypes['pxdom'] = \
{"builder": treebuilders.getTreeBuilder("dom", pxdom),
"walker": treewalkers.getTreeWalker("dom")}
except ImportError:
pass
try:
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
except ImportError:
pass
else:
def GenshiAdapter(tree):
text = None
for token in treewalkers.getTreeWalker("dom")(tree):
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if text is None:
text = token["data"]
else:
text += token["data"]
elif text is not None:
yield TEXT, text, (None, -1, -1)
text = None
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text is not None:
yield TEXT, text, (None, -1, -1)
treeTypes["genshi"] = \
{"builder": treebuilders.getTreeBuilder("dom"),
"adapter": GenshiAdapter,
"walker": treewalkers.getTreeWalker("genshi")}
def concatenateCharacterTokens(tokens):
charactersToken = None
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
if charactersToken is None:
charactersToken = {"type": "Characters", "data": token["data"]}
else:
charactersToken["data"] += token["data"]
else:
if charactersToken is not None:
yield charactersToken
charactersToken = None
yield token
if charactersToken is not None:
yield charactersToken
def convertTokens(tokens):
output = []
indent = 0
for token in concatenateCharacterTokens(tokens):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
if (token["namespace"] and
token["namespace"] != constants.namespaces["html"]):
if token["namespace"] in constants.prefixes:
name = constants.prefixes[token["namespace"]]
else:
name = token["namespace"]
name += " " + token["name"]
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
attrs = token["data"]
if attrs:
# TODO: Remove this if statement, attrs should always exist
for (namespace, name), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
outputname = constants.prefixes[namespace]
else:
outputname = namespace
outputname += " " + name
else:
outputname = name
output.append("%s%s=\"%s\"" % (" " * indent, outputname, value))
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent, token["name"],
token["publicId"],
token["systemId"] and token["systemId"] or ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent, token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type in ("Characters", "SpaceCharacters"):
output.append("%s\"%s\"" % (" " * indent, token["data"]))
else:
pass # TODO: what to do with errors?
return "\n".join(output)
import re
attrlist = re.compile(r"^(\s+)\w+=.*(\n\1\w+=.*)+", re.M)
def sortattrs(x):
lines = x.group(0).split("\n")
lines.sort()
return "\n".join(lines)
class TokenTestCase(unittest.TestCase):
def test_all_tokens(self):
expected = [
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'head'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': 'a', 'type': 'Characters'},
{'data': {}, 'type': 'StartTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'b', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'div'},
{'data': 'c', 'type': 'Characters'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'body'},
{'data': {}, 'type': 'EndTag', 'namespace': 'http://www.w3.org/1999/xhtml', 'name': 'html'}
]
for treeName, treeCls in treeTypes.items():
p = html5parser.HTMLParser(tree=treeCls["builder"])
document = p.parse("<html><head></head><body>a<div>b</div>c</body></html>")
document = treeCls.get("adapter", lambda x: x)(document)
output = treeCls["walker"](document)
for expectedToken, outputToken in zip(expected, output):
self.assertEqual(expectedToken, outputToken)
def runTreewalkerTest(innerHTML, input, expected, errors, treeClass):
warnings.resetwarnings()
warnings.simplefilter("error")
try:
p = html5parser.HTMLParser(tree=treeClass["builder"])
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except constants.DataLossWarning:
# Ignore testcases we know we don't pass
return
document = treeClass.get("adapter", lambda x: x)(document)
try:
output = convertTokens(treeClass["walker"](document))
output = attrlist.sub(sortattrs, output)
expected = attrlist.sub(sortattrs, convertExpected(expected))
diff = "".join(unified_diff([line + "\n" for line in expected.splitlines()],
[line + "\n" for line in output.splitlines()],
"Expected", "Received"))
assert expected == output, "\n".join([
"", "Input:", input,
"", "Expected:", expected,
"", "Received:", output,
"", "Diff:", diff,
])
except NotImplementedError:
pass # Amnesty for those that confess...
def test_treewalker():
sys.stdout.write('Testing tree walkers ' + " ".join(list(treeTypes.keys())) + "\n")
for treeName, treeCls in treeTypes.items():
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat", "")
if testName in ("template",):
continue
tests = TestData(filename, "data")
for index, test in enumerate(tests):
(input, errors,
innerHTML, expected) = [test[key] for key in ("data", "errors",
"document-fragment",
"document")]
errors = errors.split("\n")
yield runTreewalkerTest, innerHTML, input, expected, errors, treeCls
| vitorio/NaNoGenMo2013 | ficly-scrape/html5lib/tests/test_treewalkers.py | Python | cc0-1.0 | 11,844 |
from django.conf.urls import include, url
from storages.views import add_s, edit_s, ViewStorages
urlpatterns = [
url(r'^$', ViewStorages.as_view(), name='all'),
url(r'^add/', add_s, name='add_s'),
url(r'^edit/', edit_s, name='edit'),
url(r'^api/', include('storages.api.urls')),
]
| sfcl/severcart | storages/urls.py | Python | gpl-2.0 | 298 |
from django.db import models
from django.db.models import *
from django.contrib.auth.models import User
from django.contrib import admin
from django.db.models.signals import post_save
from taggit.managers import TaggableManager
from shared.utils import *
class UserProfile(models.Model):
tab_id=models.IntegerField(primary_key=True)
#username=models.CharField(max_length=20)
#email=models.EmailField(max_length=30)
user = OneToOneField(User)
location=models.CharField(max_length=10)
nickname=models.CharField(max_length=10)# May not needed
#password = models.CharField(max_length=15)
avatar= models.ImageField(upload_to='static/images/profile_image', blank=True)
online_status=models.BooleanField(default=False)# Good!
user_type=models.IntegerField(max_length=1,default=0)
skills=models.CharField(max_length=100, blank=True)#to mantain user profile
posts = IntegerField(default=0)
reply_count=models.IntegerField(default=0)
role=models.IntegerField()#to differentiate b/w user and admin
def __unicode__(self):
return self.username
def increment_posts(self):
self.posts += 1
self.save()
def increment_replies(self):
self.reply_count += 1
self.save()
class Category(models.Model):
category=models.CharField(max_length=20)
tags = TaggableManager()
def __unicode__(self):
return self.category
class Post(models.Model):
title = CharField(max_length=60)
body=models.TextField()
created_date=models.DateTimeField(auto_now_add=True)
#fk
user=models.ForeignKey('UserProfile')
#fk
category=models.ForeignKey('Category')
count=models.IntegerField(default=0)
admin_id=models.IntegerField()#question will need admin's approval
class Meta:
ordering = ["created_date"]
def __unicode__(self):
return self.title
def increment_count(self):
self.count += 1
self.save()
def short(self):
created = self.created_date.strftime("%b %d, %I:%M %p")
return u"%s - %s\n%s" % (self.user, self.title, created_date)
def profile_data(self):
p = self.created_date.profile
return p.posts, p.avatar
class Reply(models.Model):
#fk
title=models.ForeignKey('Post')
body=models.TextField()
#creator = ForeignKey(User, blank=True, null=True)
user=models.ForeignKey(User)
post_date=models.DateTimeField(auto_now_add=True)
file_upload=models.FileField(upload_to='forum/file')
ratings=models.IntegerField(max_length=5,default=0)# there should be a limit within which the rating should be done
admin_approved=models.BooleanField(default=False)
count=models.IntegerField(default=0)
class Meta:
ordering = ["post_date"]
def __unicode__(self):
return self.title
def increment_count(self):
self.count += 1
self.save()
def short(self):
created = self.post_date.strftime("%b %d, %I:%M %p")
return u"%s - %s\n%s" % (self.user, self.title, post_date)
def profile_data(self):
p = self.post_date.profile
return p.posts, p.avatar
class Comment(models.Model):
#fk
answer=models.ForeignKey(Reply)
text=models.TextField()
created_date=models.DateTimeField(auto_now_add=True)
count=models.IntegerField(default=0)
user=models.ForeignKey(User)
class Meta:
ordering = ["-created_date"]
def __unicode__(self):
return self.count
class Ticket(models.Model):
user_id=models.ForeignKey(User)
topic_id=models.ForeignKey(Category)
message=models.TextField()
ticket_id=models.IntegerField()
file_uploads=models.FileField(upload_to='tickets/file')
created_date_time=models.DateTimeField(auto_now_add=True)
overdue_date_time=models.DateTimeField(auto_now_add=True)
closed_date_time=models.DateTimeField(auto_now_add=True)
status=models.IntegerField()
reopened_date_time=models.DateTimeField(auto_now_add=True)
topic_priority=models.IntegerField()
duration_for_reply=models.IntegerField()
def __unicode__(self):
return self.user_id
class Tablet_info(models.Model):
rcID=models.IntegerField()
rcName=models.CharField(max_length=100)
start_tab_id=models.IntegerField()
end_tab_id=models.IntegerField()
count=models.IntegerField()
city=models.CharField(max_length=20)
def __unicode__(self):
return self.start_tab_id,self.end_tab_id
| shaswatsunder/aakashlabs-forum | shared/models.py | Python | gpl-3.0 | 4,263 |
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Miscellaneous network utility code."""
from __future__ import absolute_import, division, print_function
import errno
import os
import sys
import socket
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.util import PY3, Configurable, errno_from_exception
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
if PY3:
xrange = range
if ssl is not None:
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authentiate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(
ssl.Purpose.CLIENT_AUTH)
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# See netutil.ssl_options_to_context
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
else:
# Google App Engine
_client_ssl_defaults = dict(cert_reqs=None,
ca_certs=None)
_server_ssl_defaults = {}
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
# getaddrinfo attempts to import encodings.idna. If this is done at
# module-import time, the import lock is already held by the main thread,
# leading to deadlock. Avoid it by caching the idna encoder on the main
# thread now.
u'foo'.encode('idna')
# For undiagnosed reasons, 'latin1' codec may also need to be preloaded.
u'foo'.encode('latin1')
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore
# Default backlog used when calling sock.listen()
_DEFAULT_BACKLOG = 128
def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
backlog=_DEFAULT_BACKLOG, flags=None, reuse_port=False):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
in the list. If your platform doesn't support this option ValueError will
be raised.
"""
if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
raise ValueError("the platform doesn't support SO_REUSEPORT")
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
bound_port = None
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
0, flags)):
af, socktype, proto, canonname, sockaddr = res
if (sys.platform == 'darwin' and address == 'localhost' and
af == socket.AF_INET6 and sockaddr[3] != 0):
# Mac OS X includes a link-local address fe80::1%lo0 in the
# getaddrinfo results for 'localhost'. However, the firewall
# doesn't understand that this is a local address and will
# prompt for access (often repeatedly, due to an apparent
# bug in its ability to remember granting access to an
# application). Skip these addresses.
continue
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if errno_from_exception(e) == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and bound_port is not None:
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
bound_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
if hasattr(socket, 'AF_UNIX'):
def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG):
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
except OSError as err:
if errno_from_exception(err) != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(sock, callback):
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
be run (``connection`` is a socket object, and ``address`` is the
address of the other end of the connection). Note that this signature
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
A callable is returned which, when called, will remove the `.IOLoop`
event handler and stop processing further incoming connections.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.0
A callable is returned (``None`` was returned before).
"""
io_loop = IOLoop.current()
removed = [False]
def accept_handler(fd, events):
# More connections may come in while we're handling callbacks;
# to prevent starvation of other tasks we must limit the number
# of connections we accept at a time. Ideally we would accept
# up to the number of connections that were waiting when we
# entered this method, but this information is not available
# (and rearranging this method to call accept() as many times
# as possible before running any callbacks would have adverse
# effects on load balancing in multiprocess configurations).
# Instead, we use the (default) listen backlog as a rough
# heuristic for the number of connections we can reasonably
# accept at once.
for i in xrange(_DEFAULT_BACKLOG):
if removed[0]:
# The socket was probably closed
return
try:
connection, address = sock.accept()
except socket.error as e:
# _ERRNO_WOULDBLOCK indicate we have accepted every
# connection that is available.
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
set_close_exec(connection.fileno())
callback(connection, address)
def remove_handler():
io_loop.remove_handler(sock)
removed[0] = True
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
return remove_handler
def is_valid_ip(ip):
"""Returns true if the given string is a well-formed IP address.
Supports IPv4 and IPv6.
"""
if not ip or '\x00' in ip:
# getaddrinfo resolves empty strings to localhost, and truncates
# on zero bytes.
return False
try:
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_NUMERICHOST)
return bool(res)
except socket.gaierror as e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
class Resolver(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.BlockingResolver`
* `tornado.netutil.ThreadedResolver`
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
"""
@classmethod
def configurable_base(cls):
return Resolver
@classmethod
def configurable_default(cls):
return BlockingResolver
def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
:raises IOError: if the address cannot be resolved.
.. versionchanged:: 4.4
Standardized all implementations to raise `IOError`.
"""
raise NotImplementedError()
def close(self):
"""Closes the `Resolver`, freeing any resources used.
.. versionadded:: 3.1
"""
pass
class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
Use this instead of `ThreadedResolver` when you require additional
control over the executor being used.
The executor will be shut down when the resolver is closed unless
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def initialize(self, executor=None, close_executor=True):
self.io_loop = IOLoop.current()
if executor is not None:
self.executor = executor
self.close_executor = close_executor
else:
self.executor = dummy_executor
self.close_executor = False
def close(self):
if self.close_executor:
self.executor.shutdown()
self.executor = None
@run_on_executor
def resolve(self, host, port, family=socket.AF_UNSPEC):
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class BlockingResolver(ExecutorResolver):
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
"""
def initialize(self):
super(BlockingResolver, self).initialize()
class ThreadedResolver(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
Requires the `concurrent.futures` package to be installed
(available in the standard library since Python 3.2,
installable with ``pip install futures`` in older versions).
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
"""
_threadpool = None # type: ignore
_threadpool_pid = None # type: int
def initialize(self, num_threads=10):
threadpool = ThreadedResolver._create_threadpool(num_threads)
super(ThreadedResolver, self).initialize(
executor=threadpool, close_executor=False)
@classmethod
def _create_threadpool(cls, num_threads):
pid = os.getpid()
if cls._threadpool_pid != pid:
# Threads cannot survive after a fork, so if our pid isn't what it
# was when we created the pool then delete it.
cls._threadpool = None
if cls._threadpool is None:
from concurrent.futures import ThreadPoolExecutor
cls._threadpool = ThreadPoolExecutor(num_threads)
cls._threadpool_pid = pid
return cls._threadpool
class OverrideResolver(Resolver):
"""Wraps a resolver with a mapping of overrides.
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can contain either host strings or host-port pairs.
"""
def initialize(self, resolver, mapping):
self.resolver = resolver
self.mapping = mapping
def close(self):
self.resolver.close()
def resolve(self, host, port, *args, **kwargs):
if (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, *args, **kwargs)
# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
'cert_reqs', 'ca_certs', 'ciphers'])
def ssl_options_to_context(ssl_options):
"""Try to convert an ``ssl_options`` dictionary to an
`~ssl.SSLContext` object.
The ``ssl_options`` dictionary contains keywords to be passed to
`ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
be used instead. This function converts the dict form to its
`~ssl.SSLContext` equivalent, and may be used when a component which
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
"""
if isinstance(ssl_options, ssl.SSLContext):
return ssl_options
assert isinstance(ssl_options, dict)
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
# Can't use create_default_context since this interface doesn't
# tell us client vs server.
context = ssl.SSLContext(
ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
if 'certfile' in ssl_options:
context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
if 'cert_reqs' in ssl_options:
context.verify_mode = ssl_options['cert_reqs']
if 'ca_certs' in ssl_options:
context.load_verify_locations(ssl_options['ca_certs'])
if 'ciphers' in ssl_options:
context.set_ciphers(ssl_options['ciphers'])
if hasattr(ssl, 'OP_NO_COMPRESSION'):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant depends on openssl version 1.0.
# TODO: Do we need to do this ourselves or can we trust
# the defaults?
context.options |= ssl.OP_NO_COMPRESSION
return context
def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
"""
context = ssl_options_to_context(ssl_options)
if ssl.HAS_SNI:
# In python 3.4, wrap_socket only accepts the server_hostname
# argument if HAS_SNI is true.
# TODO: add a unittest (python added server-side SNI support in 3.4)
# In the meantime it can be manually tested with
# python3 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname,
**kwargs)
else:
return context.wrap_socket(socket, **kwargs)
| SuminAndrew/tornado | tornado/netutil.py | Python | apache-2.0 | 19,807 |
# -*- coding: utf-8 -*-
import sys
import os
import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wssdl'
copyright = u'2016, Franklin "Snaipe" Mathieu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The short X.Y version.
version = re.search(r'\d+\.\d+', release).group(0)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html', 'doctrees']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wssdl_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'wssdl.tex', u'wssdl documentation',
u'Franklin "Snaipe" Mathieu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wssdl', u'wssdl documentation',
[u'Franklin "Snaipe" Mathieu'], 3)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wssdl', u'wssdl documentation',
u'wssdl', 'wssdl', '',
''),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'wssdl'
epub_author = u'Franklin "Snaipe" Mathieu'
epub_publisher = u'Franklin "Snaipe" Mathieu'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Highlight PHP without starting <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.scripting import LuaLexer
lexers['lua'] = LuaLexer()
| diacritic/wssdl | doc/conf.py | Python | gpl-3.0 | 9,145 |
#!/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='configparserdb',
version='0.1.0',
description='Python package to access config parser based database',
long_description=readme,
author='Aamir Malik',
author_email='aamirm@gmail.com',
url='https://github.com/aamirmalik/configparserdb',
license=license,
packages=['configparserdb'],
zip_safe=False
)
| aamirmalik/configparserdb | setup.py | Python | gpl-2.0 | 544 |
import pickle
import json
import numpy as np
from clustering_methods import *
import sequenceParser
import patternRepresentation
import similarityMatrix
import cluster_postprocessing
import similarityMatrixFiltering
from file_path_global import *
sequenceParser.runProcess(filepath_sequence_pkl_w_ornament, filepath_pattern_candidates_w_ornament_json)
sequenceParser.runProcess(filepath_sequence_pkl_wo_ornament, filepath_pattern_candidates_wo_ornament_json)
patternRepresentation.runProcess(filepath_pattern_candidates_w_ornament_json,
filepath_pattern_candidates_replication_w_ornament_json,
filepath_pattern_candidates_replication_midinote_w_ornament_json)
patternRepresentation.runProcess(filepath_pattern_candidates_wo_ornament_json,
filepath_pattern_candidates_replication_wo_ornament_json,
filepath_pattern_candidates_replication_midinote_wo_ornament_json)
similarityMatrix.runProcess(filepath_pattern_candidates_w_ornament_json,
filepath_pattern_candidates_replication_midinote_w_ornament_json,
filepath_pattern_index_2_line_index_w_ornament_json,
filepath_pattern_index_2_pattern_candidates_w_ornament_json,
filepath_dissimlarity_matrix_replication_midinote_w_ornament_normalized_pkl)
similarityMatrix.runProcess(filepath_pattern_candidates_wo_ornament_json,
filepath_pattern_candidates_replication_midinote_wo_ornament_json,
filepath_pattern_index_2_line_index_wo_ornament_json,
filepath_pattern_index_2_pattern_candidates_wo_ornament_json,
filepath_dissimlarity_matrix_replication_midinote_wo_ornament_normalized_pkl)
similarityMatrixFiltering.runProcess(filepath_dissimlarity_matrix_replication_midinote_w_ornament_normalized_pkl,
filepath_pattern_index_2_pattern_candidates_w_ornament_json)
similarityMatrixFiltering.runProcess(filepath_dissimlarity_matrix_replication_midinote_wo_ornament_normalized_pkl,
filepath_pattern_index_2_pattern_candidates_wo_ornament_json)
ind_2_pattern = json.load(open('./dissimilarityMatrix/index2Pattern_wo_ornament.json', 'r'))
dissimilarity_matrix = pickle.load(open('./dissimilarityMatrix/dissimilarityMatrixReplicationMidinote_wo_ornament_normalized.pkl','rb'))
similarity_matrix = 1 - dissimilarity_matrix
# for k in range(0, 21):
# g = knnGraph(similarity_matrix, k)
# labels = g.get_labels()
# # g.plot()
# print k, g.get_modularity()
def generate_cluster(g):
labels = g.get_labels()
g.plot()
cluster = []
for kk in range(max(labels)+1):
cluster.append([ind_2_pattern[str(ii)] for ii in range(len(labels)) if labels[ii] == kk])
return cluster
# k = 0
# g = knnGraph(similarity_matrix, k)
# clusters = generate_cluster(g)
# clusters = cluster_postprocessing.runProcess(clusters)
#
# pickle.dump(clusters, open('clusters/clusters_knn0_wo_ornament_normalized.pkl', 'wb'))
k = 5
g = knnGraph(similarity_matrix, k)
clusters = generate_cluster(g)
clusters = cluster_postprocessing.runProcess(clusters)
pickle.dump(clusters, open('clusters/clusters_knn5_wo_ornament_normalized.pkl', 'wb'))
g = weightedGraph(similarity_matrix)
clusters = generate_cluster(g)
clusters = cluster_postprocessing.runProcess(clusters)
pickle.dump(clusters, open('clusters/clusters_weighted_wo_ornament_normalized.pkl', 'wb'))
| xavierfav/music-pattern-discovery | clustering_example.py | Python | agpl-3.0 | 3,566 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| seaotterman/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | Python | apache-2.0 | 57,502 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from collections import defaultdict
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError, RedirectWarning
PROJECT_TASK_READABLE_FIELDS = {
'allow_subtasks',
'allow_timesheets',
'analytic_account_active',
'effective_hours',
'encode_uom_in_days',
'planned_hours',
'progress',
'overtime',
'remaining_hours',
'subtask_effective_hours',
'subtask_planned_hours',
'timesheet_ids',
'total_hours_spent',
}
class Project(models.Model):
_inherit = "project.project"
allow_timesheets = fields.Boolean(
"Timesheets", compute='_compute_allow_timesheets', store=True, readonly=False,
default=True, help="Enable timesheeting on the project.")
analytic_account_id = fields.Many2one(
# note: replaces ['|', ('company_id', '=', False), ('company_id', '=', company_id)]
domain="""[
'|', ('company_id', '=', False), ('company_id', '=', company_id),
('partner_id', '=?', partner_id),
]"""
)
timesheet_ids = fields.One2many('account.analytic.line', 'project_id', 'Associated Timesheets')
timesheet_count = fields.Integer(compute="_compute_timesheet_count")
timesheet_encode_uom_id = fields.Many2one('uom.uom', related='company_id.timesheet_encode_uom_id')
total_timesheet_time = fields.Integer(
compute='_compute_total_timesheet_time',
help="Total number of time (in the proper UoM) recorded in the project, rounded to the unit.")
encode_uom_in_days = fields.Boolean(compute='_compute_encode_uom_in_days')
is_internal_project = fields.Boolean(compute='_compute_is_internal_project', search='_search_is_internal_project')
remaining_hours = fields.Float(compute='_compute_remaining_hours', string='Remaining Invoiced Time', compute_sudo=True)
has_planned_hours_tasks = fields.Boolean(compute='_compute_remaining_hours', compute_sudo=True,
help="True if any of the project's task has a set planned hours")
is_project_overtime = fields.Boolean('Project in Overtime', compute='_compute_remaining_hours', search='_search_is_project_overtime', compute_sudo=True)
def _compute_encode_uom_in_days(self):
self.encode_uom_in_days = self.env.company.timesheet_encode_uom_id == self.env.ref('uom.product_uom_day')
@api.depends('analytic_account_id')
def _compute_allow_timesheets(self):
without_account = self.filtered(lambda t: not t.analytic_account_id and t._origin)
without_account.update({'allow_timesheets': False})
@api.depends('company_id')
def _compute_is_internal_project(self):
for project in self:
project.is_internal_project = project == project.company_id.internal_project_id
@api.model
def _search_is_internal_project(self, operator, value):
if not isinstance(value, bool):
raise ValueError('Invalid value: %s' % (value))
if operator not in ['=', '!=']:
raise ValueError('Invalid operator: %s' % (operator))
query = """
SELECT C.internal_project_id
FROM res_company C
WHERE C.internal_project_id IS NOT NULL
"""
if (operator == '=' and value is True) or (operator == '!=' and value is False):
operator_new = 'inselect'
else:
operator_new = 'not inselect'
return [('id', operator_new, (query, ()))]
@api.depends('allow_timesheets', 'task_ids.planned_hours', 'task_ids.remaining_hours')
def _compute_remaining_hours(self):
group_read = self.env['project.task'].read_group(
domain=[('planned_hours', '!=', False), ('project_id', 'in', self.filtered('allow_timesheets').ids),
'|', ('stage_id.fold', '=', False), ('stage_id', '=', False)],
fields=['planned_hours:sum', 'remaining_hours:sum'], groupby='project_id')
group_per_project_id = {group['project_id'][0]: group for group in group_read}
for project in self:
group = group_per_project_id.get(project.id, {})
project.remaining_hours = group.get('remaining_hours', 0)
project.has_planned_hours_tasks = bool(group.get('planned_hours', False))
project.is_project_overtime = group.get('remaining_hours', 0) < 0
@api.model
def _search_is_project_overtime(self, operator, value):
if not isinstance(value, bool):
raise ValueError(_('Invalid value: %s') % value)
if operator not in ['=', '!=']:
raise ValueError(_('Invalid operator: %s') % operator)
query = """
SELECT P.id
FROM project_project P
LEFT JOIN project_task T ON P.id = T.project_id
WHERE T.planned_hours IS NOT NULL
GROUP BY P.id
HAVING SUM(T.remaining_hours) < 0
"""
if (operator == '=' and value is True) or (operator == '!=' and value is False):
operator_new = 'inselect'
else:
operator_new = 'not inselect'
return [('id', operator_new, (query, ()))]
@api.constrains('allow_timesheets', 'analytic_account_id')
def _check_allow_timesheet(self):
for project in self:
if project.allow_timesheets and not project.analytic_account_id:
raise ValidationError(_('You cannot use timesheets without an analytic account.'))
@api.depends('timesheet_ids')
def _compute_total_timesheet_time(self):
timesheets_read_group = self.env['account.analytic.line'].read_group(
[('project_id', 'in', self.ids)],
['project_id', 'unit_amount', 'product_uom_id'],
['project_id', 'product_uom_id'],
lazy=False)
timesheet_time_dict = defaultdict(list)
uom_ids = set(self.timesheet_encode_uom_id.ids)
for result in timesheets_read_group:
uom_id = result['product_uom_id'] and result['product_uom_id'][0]
if uom_id:
uom_ids.add(uom_id)
timesheet_time_dict[result['project_id'][0]].append((uom_id, result['unit_amount']))
uoms_dict = {uom.id: uom for uom in self.env['uom.uom'].browse(uom_ids)}
for project in self:
# Timesheets may be stored in a different unit of measure, so first
# we convert all of them to the reference unit
# if the timesheet has no product_uom_id then we take the one of the project
total_time = sum([
unit_amount * uoms_dict.get(product_uom_id, project.timesheet_encode_uom_id).factor_inv
for product_uom_id, unit_amount in timesheet_time_dict[project.id]
], 0.0)
# Now convert to the proper unit of measure set in the settings
total_time *= project.timesheet_encode_uom_id.factor
project.total_timesheet_time = int(round(total_time))
@api.depends('timesheet_ids')
def _compute_timesheet_count(self):
timesheet_read_group = self.env['account.analytic.line'].read_group(
[('project_id', 'in', self.ids)],
['project_id'],
['project_id']
)
timesheet_project_map = {project_info['project_id'][0]: project_info['project_id_count'] for project_info in timesheet_read_group}
for project in self:
project.timesheet_count = timesheet_project_map.get(project.id, 0)
@api.model_create_multi
def create(self, vals_list):
""" Create an analytic account if project allow timesheet and don't provide one
Note: create it before calling super() to avoid raising the ValidationError from _check_allow_timesheet
"""
defaults = self.default_get(['allow_timesheets', 'analytic_account_id'])
for vals in vals_list:
allow_timesheets = vals.get('allow_timesheets', defaults.get('allow_timesheets'))
analytic_account_id = vals.get('analytic_account_id', defaults.get('analytic_account_id'))
if allow_timesheets and not analytic_account_id:
analytic_account = self._create_analytic_account_from_values(vals)
vals['analytic_account_id'] = analytic_account.id
return super().create(vals_list)
def write(self, values):
# create the AA for project still allowing timesheet
if values.get('allow_timesheets') and not values.get('analytic_account_id'):
for project in self:
if not project.analytic_account_id:
project._create_analytic_account()
return super(Project, self).write(values)
def name_get(self):
res = super().name_get()
if len(self.env.context.get('allowed_company_ids', [])) <= 1:
return res
name_mapping = dict(res)
for project in self:
if project.is_internal_project:
name_mapping[project.id] = f'{name_mapping[project.id]} - {project.company_id.name}'
return list(name_mapping.items())
@api.model
def _init_data_analytic_account(self):
self.search([('analytic_account_id', '=', False), ('allow_timesheets', '=', True)])._create_analytic_account()
@api.ondelete(at_uninstall=False)
def _unlink_except_contains_entries(self):
"""
If some projects to unlink have some timesheets entries, these
timesheets entries must be unlinked first.
In this case, a warning message is displayed through a RedirectWarning
and allows the user to see timesheets entries to unlink.
"""
projects_with_timesheets = self.filtered(lambda p: p.timesheet_ids)
if projects_with_timesheets:
if len(projects_with_timesheets) > 1:
warning_msg = _("These projects have some timesheet entries referencing them. Before removing these projects, you have to remove these timesheet entries.")
else:
warning_msg = _("This project has some timesheet entries referencing it. Before removing this project, you have to remove these timesheet entries.")
raise RedirectWarning(
warning_msg, self.env.ref('hr_timesheet.timesheet_action_project').id,
_('See timesheet entries'), {'active_ids': projects_with_timesheets.ids})
def action_show_timesheets_by_employee_invoice_type(self):
action = self.env["ir.actions.actions"]._for_xml_id("hr_timesheet.timesheet_action_all")
#Let's put the chart view first
new_views = []
for view in action['views']:
new_views.insert(0, view) if view[1] == 'graph' else new_views.append(view)
action.update({
'display_name': _("Timesheets"),
'domain': [('project_id', '=', self.id)],
'context': {
'default_project_id': self.id,
'search_default_groupby_employee': True,
'search_default_groupby_timesheet_invoice_type': True
},
'views': new_views
})
return action
def _convert_project_uom_to_timesheet_encode_uom(self, time):
uom_from = self.company_id.project_time_mode_id
uom_to = self.env.company.timesheet_encode_uom_id
return round(uom_from._compute_quantity(time, uom_to, raise_if_failure=False), 2)
# ----------------------------
# Project Updates
# ----------------------------
def _get_stat_buttons(self):
buttons = super(Project, self)._get_stat_buttons()
if self.user_has_groups('hr_timesheet.group_hr_timesheet_user'):
buttons.append({
'icon': 'clock-o',
'text': _('Recorded'),
'number': '%s %s' % (self.total_timesheet_time, self.env.company.timesheet_encode_uom_id.name),
'action_type': 'object',
'action': 'action_show_timesheets_by_employee_invoice_type',
'show': self.allow_timesheets,
'sequence': 6,
})
return buttons
class Task(models.Model):
_name = "project.task"
_inherit = "project.task"
analytic_account_active = fields.Boolean("Active Analytic Account", compute='_compute_analytic_account_active', compute_sudo=True)
allow_timesheets = fields.Boolean("Allow timesheets", related='project_id.allow_timesheets', help="Timesheets can be logged on this task.", readonly=True)
remaining_hours = fields.Float("Remaining Hours", compute='_compute_remaining_hours', store=True, readonly=True, help="Total remaining time, can be re-estimated periodically by the assignee of the task.")
effective_hours = fields.Float("Hours Spent", compute='_compute_effective_hours', compute_sudo=True, store=True, help="Time spent on this task, excluding its sub-tasks.")
total_hours_spent = fields.Float("Total Hours", compute='_compute_total_hours_spent', store=True, help="Time spent on this task, including its sub-tasks.")
progress = fields.Float("Progress", compute='_compute_progress_hours', store=True, group_operator="avg", help="Display progress of current task.")
overtime = fields.Float(compute='_compute_progress_hours', store=True)
subtask_effective_hours = fields.Float("Sub-tasks Hours Spent", compute='_compute_subtask_effective_hours', recursive=True, store=True, help="Time spent on the sub-tasks (and their own sub-tasks) of this task.")
timesheet_ids = fields.One2many('account.analytic.line', 'task_id', 'Timesheets')
encode_uom_in_days = fields.Boolean(compute='_compute_encode_uom_in_days', default=lambda self: self._uom_in_days())
@property
def SELF_READABLE_FIELDS(self):
return super().SELF_READABLE_FIELDS | PROJECT_TASK_READABLE_FIELDS
def _uom_in_days(self):
return self.env.company.timesheet_encode_uom_id == self.env.ref('uom.product_uom_day')
def _compute_encode_uom_in_days(self):
self.encode_uom_in_days = self._uom_in_days()
@api.depends('analytic_account_id.active', 'project_id.analytic_account_id.active')
def _compute_analytic_account_active(self):
""" Overridden in sale_timesheet """
for task in self:
task.analytic_account_active = task._get_task_analytic_account_id().active
@api.depends('timesheet_ids.unit_amount')
def _compute_effective_hours(self):
for task in self:
task.effective_hours = round(sum(task.timesheet_ids.mapped('unit_amount')), 2)
@api.depends('effective_hours', 'subtask_effective_hours', 'planned_hours')
def _compute_progress_hours(self):
for task in self:
if (task.planned_hours > 0.0):
task_total_hours = task.effective_hours + task.subtask_effective_hours
task.overtime = max(task_total_hours - task.planned_hours, 0)
if task_total_hours > task.planned_hours:
task.progress = 100
else:
task.progress = round(100.0 * task_total_hours / task.planned_hours, 2)
else:
task.progress = 0.0
task.overtime = 0
@api.depends('effective_hours', 'subtask_effective_hours', 'planned_hours')
def _compute_remaining_hours(self):
for task in self:
task.remaining_hours = task.planned_hours - task.effective_hours - task.subtask_effective_hours
@api.depends('effective_hours', 'subtask_effective_hours')
def _compute_total_hours_spent(self):
for task in self:
task.total_hours_spent = task.effective_hours + task.subtask_effective_hours
@api.depends('child_ids.effective_hours', 'child_ids.subtask_effective_hours')
def _compute_subtask_effective_hours(self):
for task in self.with_context(active_test=False):
task.subtask_effective_hours = sum(child_task.effective_hours + child_task.subtask_effective_hours for child_task in task.child_ids)
def action_view_subtask_timesheet(self):
self.ensure_one()
tasks = self.with_context(active_test=False)._get_all_subtasks()
return {
'type': 'ir.actions.act_window',
'name': _('Timesheets'),
'res_model': 'account.analytic.line',
'view_mode': 'list,form',
'context': {
'default_project_id': self.project_id.id
},
'domain': [('project_id', '!=', False), ('task_id', 'in', tasks.ids)],
}
def _get_timesheet(self):
# Is override in sale_timesheet
return self.timesheet_ids
def write(self, values):
# a timesheet must have an analytic account (and a project)
if 'project_id' in values and not values.get('project_id') and self._get_timesheet():
raise UserError(_('This task must be part of a project because there are some timesheets linked to it.'))
res = super(Task, self).write(values)
if 'project_id' in values:
project = self.env['project.project'].browse(values.get('project_id'))
if project.allow_timesheets:
# We write on all non yet invoiced timesheet the new project_id (if project allow timesheet)
self._get_timesheet().write({'project_id': values.get('project_id')})
return res
def name_get(self):
if self.env.context.get('hr_timesheet_display_remaining_hours'):
name_mapping = dict(super().name_get())
for task in self:
if task.allow_timesheets and task.planned_hours > 0 and task.encode_uom_in_days:
days_left = _("(%s days remaining)") % task._convert_hours_to_days(task.remaining_hours)
name_mapping[task.id] = name_mapping.get(task.id, '') + u"\u00A0" + days_left
elif task.allow_timesheets and task.planned_hours > 0:
hours, mins = (str(int(duration)).rjust(2, '0') for duration in divmod(abs(task.remaining_hours) * 60, 60))
hours_left = _(
"(%(sign)s%(hours)s:%(minutes)s remaining)",
sign='-' if task.remaining_hours < 0 else '',
hours=hours,
minutes=mins,
)
name_mapping[task.id] = name_mapping.get(task.id, '') + u"\u00A0" + hours_left
return list(name_mapping.items())
return super().name_get()
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
""" Set the correct label for `unit_amount`, depending on company UoM """
result = super(Task, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
# Use of sudo as the portal user doesn't have access to uom
result['arch'] = self.env['account.analytic.line'].sudo()._apply_timesheet_label(result['arch'])
if view_type in ['tree', 'pivot', 'graph'] and self.env.company.timesheet_encode_uom_id == self.env.ref('uom.product_uom_day'):
result['arch'] = self.env['account.analytic.line']._apply_time_label(result['arch'], related_model=self._name)
return result
@api.ondelete(at_uninstall=False)
def _unlink_except_contains_entries(self):
"""
If some tasks to unlink have some timesheets entries, these
timesheets entries must be unlinked first.
In this case, a warning message is displayed through a RedirectWarning
and allows the user to see timesheets entries to unlink.
"""
tasks_with_timesheets = self.filtered(lambda t: t.timesheet_ids)
if tasks_with_timesheets:
if len(tasks_with_timesheets) > 1:
warning_msg = _("These tasks have some timesheet entries referencing them. Before removing these tasks, you have to remove these timesheet entries.")
else:
warning_msg = _("This task has some timesheet entries referencing it. Before removing this task, you have to remove these timesheet entries.")
raise RedirectWarning(
warning_msg, self.env.ref('hr_timesheet.timesheet_action_task').id,
_('See timesheet entries'), {'active_ids': tasks_with_timesheets.ids})
@api.model
def _convert_hours_to_days(self, time):
uom_hour = self.env.ref('uom.product_uom_hour')
uom_day = self.env.ref('uom.product_uom_day')
return round(uom_hour._compute_quantity(time, uom_day, raise_if_failure=False), 2)
| jeremiahyan/odoo | addons/hr_timesheet/models/project.py | Python | gpl-3.0 | 20,630 |
"""Unit tests for the memoryview
XXX We need more tests! Some tests are in test_bytes
"""
import unittest
import test.support
import sys
import gc
import weakref
import array
class AbstractMemoryTests:
source_bytes = b"abcdef"
@property
def _source(self):
return self.source_bytes
@property
def _types(self):
return filter(None, [self.ro_type, self.rw_type])
def check_getitem_with_type(self, tp):
item = self.getitem_type
b = tp(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
self.assertEquals(m[0], item(b"a"))
self.assertTrue(isinstance(m[0], bytes), type(m[0]))
self.assertEquals(m[5], item(b"f"))
self.assertEquals(m[-1], item(b"f"))
self.assertEquals(m[-6], item(b"a"))
# Bounds checking
self.assertRaises(IndexError, lambda: m[6])
self.assertRaises(IndexError, lambda: m[-7])
self.assertRaises(IndexError, lambda: m[sys.maxsize])
self.assertRaises(IndexError, lambda: m[-sys.maxsize])
# Type checking
self.assertRaises(TypeError, lambda: m[None])
self.assertRaises(TypeError, lambda: m[0.0])
self.assertRaises(TypeError, lambda: m["a"])
m = None
self.assertEquals(sys.getrefcount(b), oldrefcount)
def test_getitem(self):
for tp in self._types:
self.check_getitem_with_type(tp)
def test_iter(self):
for tp in self._types:
b = tp(self._source)
m = self._view(b)
self.assertEqual(list(m), [m[i] for i in range(len(m))])
def test_setitem_readonly(self):
if not self.ro_type:
return
b = self.ro_type(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
def setitem(value):
m[0] = value
self.assertRaises(TypeError, setitem, b"a")
self.assertRaises(TypeError, setitem, 65)
self.assertRaises(TypeError, setitem, memoryview(b"a"))
m = None
self.assertEquals(sys.getrefcount(b), oldrefcount)
def test_setitem_writable(self):
if not self.rw_type:
return
tp = self.rw_type
b = self.rw_type(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
m[0] = tp(b"0")
self._check_contents(tp, b, b"0bcdef")
m[1:3] = tp(b"12")
self._check_contents(tp, b, b"012def")
m[1:1] = tp(b"")
self._check_contents(tp, b, b"012def")
m[:] = tp(b"abcdef")
self._check_contents(tp, b, b"abcdef")
# Overlapping copies of a view into itself
m[0:3] = m[2:5]
self._check_contents(tp, b, b"cdedef")
m[:] = tp(b"abcdef")
m[2:5] = m[0:3]
self._check_contents(tp, b, b"ababcf")
def setitem(key, value):
m[key] = tp(value)
# Bounds checking
self.assertRaises(IndexError, setitem, 6, b"a")
self.assertRaises(IndexError, setitem, -7, b"a")
self.assertRaises(IndexError, setitem, sys.maxsize, b"a")
self.assertRaises(IndexError, setitem, -sys.maxsize, b"a")
# Wrong index/slice types
self.assertRaises(TypeError, setitem, 0.0, b"a")
self.assertRaises(TypeError, setitem, (0,), b"a")
self.assertRaises(TypeError, setitem, "a", b"a")
# Trying to resize the memory object
self.assertRaises(ValueError, setitem, 0, b"")
self.assertRaises(ValueError, setitem, 0, b"ab")
self.assertRaises(ValueError, setitem, slice(1,1), b"a")
self.assertRaises(ValueError, setitem, slice(0,2), b"a")
m = None
self.assertEquals(sys.getrefcount(b), oldrefcount)
def test_tobytes(self):
for tp in self._types:
m = self._view(tp(self._source))
b = m.tobytes()
# This calls self.getitem_type() on each separate byte of b"abcdef"
expected = b"".join(
self.getitem_type(bytes([c])) for c in b"abcdef")
self.assertEquals(b, expected)
self.assertTrue(isinstance(b, bytes), type(b))
def test_tolist(self):
for tp in self._types:
m = self._view(tp(self._source))
l = m.tolist()
self.assertEquals(l, list(b"abcdef"))
def test_compare(self):
# memoryviews can compare for equality with other objects
# having the buffer interface.
for tp in self._types:
m = self._view(tp(self._source))
for tp_comp in self._types:
self.assertTrue(m == tp_comp(b"abcdef"))
self.assertFalse(m != tp_comp(b"abcdef"))
self.assertFalse(m == tp_comp(b"abcde"))
self.assertTrue(m != tp_comp(b"abcde"))
self.assertFalse(m == tp_comp(b"abcde1"))
self.assertTrue(m != tp_comp(b"abcde1"))
self.assertTrue(m == m)
self.assertTrue(m == m[:])
self.assertTrue(m[0:6] == m[:])
self.assertFalse(m[0:5] == m)
# Comparison with objects which don't support the buffer API
self.assertFalse(m == "abcdef")
self.assertTrue(m != "abcdef")
self.assertFalse("abcdef" == m)
self.assertTrue("abcdef" != m)
# Unordered comparisons
for c in (m, b"abcdef"):
self.assertRaises(TypeError, lambda: m < c)
self.assertRaises(TypeError, lambda: c <= m)
self.assertRaises(TypeError, lambda: m >= c)
self.assertRaises(TypeError, lambda: c > m)
def check_attributes_with_type(self, tp):
m = self._view(tp(self._source))
self.assertEquals(m.format, self.format)
self.assertEquals(m.itemsize, self.itemsize)
self.assertEquals(m.ndim, 1)
self.assertEquals(m.shape, (6,))
self.assertEquals(len(m), 6)
self.assertEquals(m.strides, (self.itemsize,))
self.assertEquals(m.suboffsets, None)
return m
def test_attributes_readonly(self):
if not self.ro_type:
return
m = self.check_attributes_with_type(self.ro_type)
self.assertEquals(m.readonly, True)
def test_attributes_writable(self):
if not self.rw_type:
return
m = self.check_attributes_with_type(self.rw_type)
self.assertEquals(m.readonly, False)
def test_getbuffer(self):
# Test PyObject_GetBuffer() on a memoryview object.
for tp in self._types:
b = tp(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
oldviewrefcount = sys.getrefcount(m)
s = str(m, "utf-8")
self._check_contents(tp, b, s.encode("utf-8"))
self.assertEquals(sys.getrefcount(m), oldviewrefcount)
m = None
self.assertEquals(sys.getrefcount(b), oldrefcount)
def test_gc(self):
for tp in self._types:
if not isinstance(tp, type):
# If tp is a factory rather than a plain type, skip
continue
class MySource(tp):
pass
class MyObject:
pass
# Create a reference cycle through a memoryview object
b = MySource(tp(b'abc'))
m = self._view(b)
o = MyObject()
b.m = m
b.o = o
wr = weakref.ref(o)
b = m = o = None
# The cycle must be broken
gc.collect()
self.assertTrue(wr() is None, wr())
# Variations on source objects for the buffer: bytes-like objects, then arrays
# with itemsize > 1.
# NOTE: support for multi-dimensional objects is unimplemented.
class BaseBytesMemoryTests(AbstractMemoryTests):
ro_type = bytes
rw_type = bytearray
getitem_type = bytes
itemsize = 1
format = 'B'
class BaseArrayMemoryTests(AbstractMemoryTests):
ro_type = None
rw_type = lambda self, b: array.array('i', list(b))
getitem_type = lambda self, b: array.array('i', list(b)).tostring()
itemsize = array.array('i').itemsize
format = 'i'
def test_getbuffer(self):
# XXX Test should be adapted for non-byte buffers
pass
def test_tolist(self):
# XXX NotImplementedError: tolist() only supports byte views
pass
# Variations on indirection levels: memoryview, slice of memoryview,
# slice of slice of memoryview.
# This is important to test allocation subtleties.
class BaseMemoryviewTests:
def _view(self, obj):
return memoryview(obj)
def _check_contents(self, tp, obj, contents):
self.assertEquals(obj, tp(contents))
class BaseMemorySliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[1:7]
def _check_contents(self, tp, obj, contents):
self.assertEquals(obj[1:7], tp(contents))
def test_refs(self):
for tp in self._types:
m = memoryview(tp(self._source))
oldrefcount = sys.getrefcount(m)
m[1:2]
self.assertEquals(sys.getrefcount(m), oldrefcount)
class BaseMemorySliceSliceTests:
source_bytes = b"XabcdefY"
def _view(self, obj):
m = memoryview(obj)
return m[:7][1:]
def _check_contents(self, tp, obj, contents):
self.assertEquals(obj[1:7], tp(contents))
# Concrete test classes
class BytesMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseBytesMemoryTests):
def test_constructor(self):
for tp in self._types:
ob = tp(self._source)
self.assertTrue(memoryview(ob))
self.assertTrue(memoryview(object=ob))
self.assertRaises(TypeError, memoryview)
self.assertRaises(TypeError, memoryview, ob, ob)
self.assertRaises(TypeError, memoryview, argument=ob)
self.assertRaises(TypeError, memoryview, ob, argument=True)
class ArrayMemoryviewTest(unittest.TestCase,
BaseMemoryviewTests, BaseArrayMemoryTests):
def test_array_assign(self):
# Issue #4569: segfault when mutating a memoryview with itemsize != 1
a = array.array('i', range(10))
m = memoryview(a)
new_a = array.array('i', range(9, -1, -1))
m[:] = new_a
self.assertEquals(a, new_a)
class BytesMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceTest(unittest.TestCase,
BaseMemorySliceTests, BaseArrayMemoryTests):
pass
class BytesMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseBytesMemoryTests):
pass
class ArrayMemorySliceSliceTest(unittest.TestCase,
BaseMemorySliceSliceTests, BaseArrayMemoryTests):
pass
def test_main():
test.support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| mancoast/CPythonPyc_test | fail/312_test_memoryview.py | Python | gpl-3.0 | 10,981 |
# sql/selectable.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`.FromClause` class of SQL expression elements, representing
SQL tables and derived rowsets.
"""
from .elements import ClauseElement, TextClause, ClauseList, \
and_, Grouping, UnaryExpression, literal_column
from .elements import _clone, \
_literal_as_text, _interpret_as_column_or_from, _expand_cloned,\
_select_iterables, _anonymous_label, _clause_element_as_expr,\
_cloned_intersection, _cloned_difference, True_, _only_column_elements
from .base import Immutable, Executable, _generative, \
ColumnCollection, ColumnSet, _from_objects, Generative
from . import type_api
from .. import inspection
from .. import util
from .. import exc
from operator import attrgetter
from . import operators
import operator
from .annotation import Annotated
import itertools
def _interpret_as_from(element):
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, util.string_types):
return TextClause(util.text_type(element))
elif hasattr(insp, "selectable"):
return insp.selectable
raise exc.ArgumentError("FROM expression expected")
def _interpret_as_select(element):
element = _interpret_as_from(element)
if isinstance(element, Alias):
element = element.original
if not isinstance(element, Select):
element = element.select()
return element
def subquery(alias, *args, **kwargs):
"""Return an :class:`.Alias` object derived
from a :class:`.Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def alias(selectable, name=None, flat=False):
"""Return an :class:`.Alias` object.
An :class:`.Alias` represents any :class:`.FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the
:meth:`~.FromClause.alias` method
available on all :class:`.FromClause` subclasses.
When an :class:`.Alias` is created from a :class:`.Table` object,
this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For :func:`.select` objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The ``name`` parameter is optional, and provides the name
to use in the rendered SQL. If blank, an "anonymous" name
will be deterministically generated at compile time.
Deterministic means the name is guaranteed to be unique against
other constructs used in the same statement, and will also be the
same name for each successive compilation of the same statement
object.
:param selectable: any :class:`.FromClause` subclass,
such as a table, select statement, etc.
:param name: string name to be assigned as the alias.
If ``None``, a name will be deterministically generated
at compile time.
:param flat: Will be passed through to if the given selectable
is an instance of :class:`.Join` - see :meth:`.Join.alias`
for details.
.. versionadded:: 0.9.0
"""
return selectable.alias(name=name, flat=flat)
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
is_selectable = True
@property
def selectable(self):
return self
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
The most common forms of :class:`.FromClause` are the
:class:`.Table` and the :func:`.select` constructs. Key
features common to all :class:`.FromClause` objects include:
* a :attr:`.c` collection, which provides per-name access to a collection
of :class:`.ColumnElement` objects.
* a :attr:`.primary_key` attribute, which is a collection of all those
:class:`.ColumnElement` objects that indicate the ``primary_key`` flag.
* Methods to generate various derivations of a "from" clause, including
:meth:`.FromClause.alias`, :meth:`.FromClause.join`,
:meth:`.FromClause.select`.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
_is_join = False
_is_select = False
_is_from_container = False
_textual = False
"""a marker that allows us to easily distinguish a :class:`.TextAsFrom`
or similar object from other kinds of :class:`.FromClause` objects."""
schema = None
"""Define the 'schema' attribute for this :class:`.FromClause`.
This is typically ``None`` for most objects except that of :class:`.Table`,
where it is taken as the value of the :paramref:`.Table.schema` argument.
"""
_memoized_property = util.group_expirable_memoized_property(["_columns"])
@util.dependencies("sqlalchemy.sql.functions")
def count(self, functions, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.FromClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return Select(
[functions.func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`.FromClause`.
.. seealso::
:func:`~.sql.expression.select` - general purpose
method which allows for arbitrary column lists.
"""
return Select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
"""Return a :class:`.Join` from this :class:`.FromClause`
to another :class:`FromClause`.
E.g.::
from sqlalchemy import join
j = user_table.join(address_table,
user_table.c.id == address_table.c.user_id)
stmt = select([user_table]).select_from(j)
would emit SQL along the lines of::
SELECT user.id, user.name FROM user
JOIN address ON user.id = address.user_id
:param right: the right side of the join; this is any :class:`.FromClause`
object such as a :class:`.Table` object, and may also be a selectable-compatible
object such as an ORM-mapped class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`.FromClause.join` will attempt to
join the two tables based on a foreign key relationship.
:param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN.
.. seealso::
:func:`.join` - standalone function
:class:`.Join` - the type of object produced
"""
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
"""Return a :class:`.Join` from this :class:`.FromClause`
to another :class:`FromClause`, with the "isouter" flag set to
True.
E.g.::
from sqlalchemy import outerjoin
j = user_table.outerjoin(address_table,
user_table.c.id == address_table.c.user_id)
The above is equivalent to::
j = user_table.join(address_table,
user_table.c.id == address_table.c.user_id, isouter=True)
:param right: the right side of the join; this is any :class:`.FromClause`
object such as a :class:`.Table` object, and may also be a selectable-compatible
object such as an ORM-mapped class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`.FromClause.join` will attempt to
join the two tables based on a foreign key relationship.
.. seealso::
:meth:`.FromClause.join`
:class:`.Join`
"""
return Join(self, right, onclause, True)
def alias(self, name=None, flat=False):
"""return an alias of this :class:`.FromClause`.
This is shorthand for calling::
from sqlalchemy import alias
a = alias(self, name=name)
See :func:`~.expression.alias` for details.
"""
return Alias(self, name)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given
FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
# this is essentially an "identity" check in the base class.
# Other constructs override this to traverse through
# contained elements.
return fromclause in self._cloned_set
def _is_lexical_equivalent(self, other):
"""Return True if this FromClause and the other represent
the same lexical identity.
This tests if either one is a copy of the other, or
if they are the same via annotation identity.
"""
return self._cloned_set.intersection(other._cloned_set)
@util.dependencies("sqlalchemy.sql.util")
def replace_selectable(self, sqlutil, old, alias):
"""replace all occurrences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`.FromClause`.
"""
return sqlutil.ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`.ColumnElement`, return the exported
:class:`.ColumnElement` object from this :class:`.Selectable`
which corresponds to that original
:class:`~sqlalchemy.schema.Column` via a common ancestor
column.
:param column: the target :class:`.ColumnElement` to be matched
:param require_embedded: only return corresponding columns for
the given :class:`.ColumnElement`, if the given :class:`.ColumnElement`
is actually present within a sub-element
of this :class:`.FromClause`. Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`.FromClause`.
"""
def embedded(expanded_proxy_set, target_set):
for t in target_set.difference(expanded_proxy_set):
if not set(_expand_cloned([t])
).intersection(expanded_proxy_set):
return False
return True
# don't dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c._all_columns
for c in cols:
expanded_proxy_set = set(_expand_cloned(c.proxy_set))
i = target_set.intersection(expanded_proxy_set)
if i and (not require_embedded
or embedded(expanded_proxy_set, target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
col.proxy_set if sc.shares_lineage(column)])
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
c.proxy_set if sc.shares_lineage(column)])
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
self._memoized_property.expire_instance(self)
@_memoized_property
def columns(self):
"""A named-based collection of :class:`.ColumnElement` objects
maintained by this :class:`.FromClause`.
The :attr:`.columns`, or :attr:`.c` collection, is the gateway
to the construction of SQL expressions using table-bound or
other selectable-bound columns::
select([mytable]).where(mytable.c.somecolumn == 5)
"""
if '_columns' not in self.__dict__:
self._init_collections()
self._populate_column_collection()
return self._columns.as_immutable()
@_memoized_property
def primary_key(self):
"""Return the collection of Column objects which comprise the
primary key of this FromClause."""
self._init_collections()
self._populate_column_collection()
return self.primary_key
@_memoized_property
def foreign_keys(self):
"""Return the collection of ForeignKey objects which this
FromClause references."""
self._init_collections()
self._populate_column_collection()
return self.foreign_keys
c = property(attrgetter('columns'),
doc="An alias for the :attr:`.columns` attribute.")
_select_iterable = property(attrgetter('columns'))
def _init_collections(self):
assert '_columns' not in self.__dict__
assert 'primary_key' not in self.__dict__
assert 'foreign_keys' not in self.__dict__
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
@property
def _cols_populated(self):
return '_columns' in self.__dict__
def _populate_column_collection(self):
"""Called on subclasses to establish the .c collection.
Each implementation has a different way of establishing
this collection.
"""
def _refresh_for_new_column(self, column):
"""Given a column added to the .c collection of an underlying
selectable, produce the local version of that column, assuming this
selectable ultimately should proxy this column.
this is used to "ping" a derived selectable to add a new column
to its .c. collection when a Column has been added to one of the
Table objects it ultimtely derives from.
If the given selectable hasn't populated it's .c. collection yet,
it should at least pass on the message to the contained selectables,
but it will return None.
This method is currently used by Declarative to allow Table
columns to be added to a partially constructed inheritance
mapping that may have already produced joins. The method
isn't public right now, as the full span of implications
and/or caveats aren't yet clear.
It's also possible that this functionality could be invoked by
default via an event, which would require that
selectables maintain a weak referencing collection of all
derivations.
"""
if not self._cols_populated:
return None
elif column.key in self.columns and self.columns[column.key] is column:
return column
else:
return None
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`.FromClause`
elements.
The public constructor function for :class:`.Join` is the module-level
:func:`.join()` function, as well as the :meth:`.FromClause.join` method
of any :class:`.FromClause` (e.g. such as :class:`.Table`).
.. seealso::
:func:`.join`
:meth:`.FromClause.join`
"""
__visit_name__ = 'join'
_is_join = True
def __init__(self, left, right, onclause=None, isouter=False):
"""Construct a new :class:`.Join`.
The usual entrypoint here is the :func:`~.expression.join`
function or the :meth:`.FromClause.join` method of any
:class:`.FromClause` object.
"""
self.left = _interpret_as_from(left)
self.right = _interpret_as_from(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
@classmethod
def _create_outerjoin(cls, left, right, onclause=None):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.outerjoin()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return cls(left, right, onclause, isouter=True)
@classmethod
def _create_join(cls, left, right, onclause=None, isouter=False):
"""Produce a :class:`.Join` object, given two :class:`.FromClause`
expressions.
E.g.::
j = join(user_table, address_table, user_table.c.id == address_table.c.user_id)
stmt = select([user_table]).select_from(j)
would emit SQL along the lines of::
SELECT user.id, user.name FROM user
JOIN address ON user.id = address.user_id
Similar functionality is available given any :class:`.FromClause` object
(e.g. such as a :class:`.Table`) using the :meth:`.FromClause.join`
method.
:param left: The left side of the join.
:param right: the right side of the join; this is any :class:`.FromClause`
object such as a :class:`.Table` object, and may also be a selectable-compatible
object such as an ORM-mapped class.
:param onclause: a SQL expression representing the ON clause of the
join. If left at ``None``, :meth:`.FromClause.join` will attempt to
join the two tables based on a foreign key relationship.
:param isouter: if True, render a LEFT OUTER JOIN, instead of JOIN.
.. seealso::
:meth:`.FromClause.join` - method form, based on a given left side
:class:`.Join` - the type of object produced
"""
return cls(left, right, onclause, isouter)
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or \
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return FromGrouping(self)
@util.dependencies("sqlalchemy.sql.util")
def _populate_column_collection(self, sqlutil):
columns = [c for c in self.left.columns] + \
[c for c in self.right.columns]
self.primary_key.extend(sqlutil.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self.foreign_keys.update(itertools.chain(
*[col.foreign_keys for col in columns]))
def _refresh_for_new_column(self, column):
col = self.left._refresh_for_new_column(column)
if col is None:
col = self.right._refresh_for_new_column(column)
if col is not None:
if self._cols_populated:
self._columns[col._label] = col
self.foreign_keys.add(col)
if col.primary_key:
self.primary_key.add(col)
return col
return None
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
self.onclause = clone(self.onclause, **kw)
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, left, right):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
return self._join_condition(left, right, a_subset=left_right)
@classmethod
def _join_condition(cls, a, b, ignore_nonexistent_tables=False,
a_subset=None,
consider_as_foreign_keys=None):
"""create a join condition between two tables or selectables.
e.g.::
join_condition(tablea, tableb)
would produce an expression along the lines of::
tablea.c.id==tableb.c.tablea_id
The join is determined based on the foreign key relationships
between the two selectables. If there are multiple ways
to join, or no way to join, an error is raised.
:param ignore_nonexistent_tables: Deprecated - this
flag is no longer used. Only resolution errors regarding
the two given tables are propagated.
:param a_subset: An optional expression that is a sub-component
of ``a``. An attempt will be made to join to just this sub-component
first before looking at the full ``a`` construct, and if found
will be successful even if there are other ways to join to ``a``.
This allows the "right side" of a join to be passed thereby
providing a "natural join".
"""
crit = []
constraints = set()
for left in (a_subset, a):
if left is None:
continue
for fk in sorted(
b.foreign_keys,
key=lambda fk: fk.parent._creation_order):
if consider_as_foreign_keys is not None and \
fk.parent not in consider_as_foreign_keys:
continue
try:
col = fk.get_referent(left)
except exc.NoReferenceError as nrte:
if nrte.table_name == left.name:
raise
else:
continue
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if left is not b:
for fk in sorted(
left.foreign_keys,
key=lambda fk: fk.parent._creation_order):
if consider_as_foreign_keys is not None and \
fk.parent not in consider_as_foreign_keys:
continue
try:
col = fk.get_referent(b)
except exc.NoReferenceError as nrte:
if nrte.table_name == b.name:
raise
else:
# this is totally covered. can't get
# coverage to mark it.
continue
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if crit:
break
if len(crit) == 0:
if isinstance(b, FromGrouping):
hint = " Perhaps you meant to convert the right side to a "\
"subquery using alias()?"
else:
hint = ""
raise exc.NoForeignKeysError(
"Can't find any foreign key relationships "
"between '%s' and '%s'.%s" % (a.description, b.description, hint))
elif len(constraints) > 1:
raise exc.AmbiguousForeignKeysError(
"Can't determine join between '%s' and '%s'; "
"tables have more than one foreign key "
"constraint relationship between them. "
"Please specify the 'onclause' of this "
"join explicitly." % (a.description, b.description))
elif len(crit) == 1:
return (crit[0])
else:
return and_(*crit)
def select(self, whereclause=None, **kwargs):
"""Create a :class:`.Select` from this :class:`.Join`.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select
j = select([j.left, j.right], **kw).\\
where(whereclause).\\
select_from(j)
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
collist = [self.left, self.right]
return Select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
@util.dependencies("sqlalchemy.sql.util")
def alias(self, sqlutil, name=None, flat=False):
"""return an alias of this :class:`.Join`.
The default behavior here is to first produce a SELECT
construct from this :class:`.Join`, then to produce a
:class:`.Alias` from that. So given a join of the form::
j = table_a.join(table_b, table_a.c.id == table_b.c.a_id)
The JOIN by itself would look like::
table_a JOIN table_b ON table_a.id = table_b.a_id
Whereas the alias of the above, ``j.alias()``, would in a
SELECT context look like::
(SELECT table_a.id AS table_a_id, table_b.id AS table_b_id,
table_b.a_id AS table_b_a_id
FROM table_a
JOIN table_b ON table_a.id = table_b.a_id) AS anon_1
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select, alias
j = alias(
select([j.left, j.right]).\\
select_from(j).\\
with_labels(True).\\
correlate(False),
name=name
)
The selectable produced by :meth:`.Join.alias` features the same
columns as that of the two individual selectables presented under
a single name - the individual columns are "auto-labeled", meaning
the ``.c.`` collection of the resulting :class:`.Alias` represents
the names of the individual columns using a ``<tablename>_<columname>``
scheme::
j.c.table_a_id
j.c.table_b_a_id
:meth:`.Join.alias` also features an alternate
option for aliasing joins which produces no enclosing SELECT and
does not normally apply labels to the column names. The
``flat=True`` option will call :meth:`.FromClause.alias`
against the left and right sides individually.
Using this option, no new ``SELECT`` is produced;
we instead, from a construct as below::
j = table_a.join(table_b, table_a.c.id == table_b.c.a_id)
j = j.alias(flat=True)
we get a result like this::
table_a AS table_a_1 JOIN table_b AS table_b_1 ON
table_a_1.id = table_b_1.a_id
The ``flat=True`` argument is also propagated to the contained
selectables, so that a composite join such as::
j = table_a.join(
table_b.join(table_c,
table_b.c.id == table_c.c.b_id),
table_b.c.a_id == table_a.c.id
).alias(flat=True)
Will produce an expression like::
table_a AS table_a_1 JOIN (
table_b AS table_b_1 JOIN table_c AS table_c_1
ON table_b_1.id = table_c_1.b_id
) ON table_a_1.id = table_b_1.a_id
The standalone :func:`~.expression.alias` function as well as the
base :meth:`.FromClause.alias` method also support the ``flat=True``
argument as a no-op, so that the argument can be passed to the
``alias()`` method of any selectable.
.. versionadded:: 0.9.0 Added the ``flat=True`` option to create
"aliases" of joins without enclosing inside of a SELECT
subquery.
:param name: name given to the alias.
:param flat: if True, produce an alias of the left and right
sides of this :class:`.Join` and return the join of those
two selectables. This produces join expression that does not
include an enclosing SELECT.
.. versionadded:: 0.9.0
.. seealso::
:func:`~.expression.alias`
"""
if flat:
assert name is None, "Can't send name argument with flat"
left_a, right_a = self.left.alias(flat=True), \
self.right.alias(flat=True)
adapter = sqlutil.ClauseAdapter(left_a).\
chain(sqlutil.ClauseAdapter(right_a))
return left_a.join(right_a,
adapter.traverse(self.onclause), isouter=self.isouter)
else:
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right)
for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`~.expression.alias` module level
function as well as the :meth:`.FromClause.alias` method available on all
:class:`.FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
_is_from_container = True
def __init__(self, selectable, name=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if name is None:
if self.original.named_with_column:
name = getattr(self.original, 'name', None)
name = _anonymous_label('%%(%d %s)s' % (id(self), name
or 'anon'))
self.name = name
@property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support "
"'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns._all_columns:
col._make_proxy(self)
def _refresh_for_new_column(self, column):
col = self.element._refresh_for_new_column(column)
if col is not None:
if not self._cols_populated:
return None
else:
return col._make_proxy(self)
else:
return None
def _copy_internals(self, clone=_clone, **kw):
# don't apply anything to an aliased Table
# for now. May want to drive this from
# the given **kw.
if isinstance(self.element, TableClause):
return
self._reset_exported()
self.element = clone(self.element, **kw)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, **kw):
if column_collections:
for c in self.c:
yield c
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class CTE(Alias):
"""Represent a Common Table Expression.
The :class:`.CTE` object is obtained using the
:meth:`.SelectBase.cte` method from any selectable.
See that method for complete examples.
.. versionadded:: 0.7.6
"""
__visit_name__ = 'cte'
def __init__(self, selectable,
name=None,
recursive=False,
_cte_alias=None,
_restates=frozenset()):
self.recursive = recursive
self._cte_alias = _cte_alias
self._restates = _restates
super(CTE, self).__init__(selectable, name=name)
def alias(self, name=None, flat=False):
return CTE(
self.original,
name=name,
recursive=self.recursive,
_cte_alias=self,
)
def union(self, other):
return CTE(
self.original.union(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
def union_all(self, other):
return CTE(
self.original.union_all(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
class FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
def _init_collections(self):
pass
@property
def columns(self):
return self.element.columns
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def is_derived_from(self, element):
return self.element.is_derived_from(element)
def alias(self, **kw):
return FromGrouping(self.element.alias(**kw))
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element}
def __setstate__(self, state):
self.element = state['element']
class TableClause(Immutable, FromClause):
"""Represents a minimal "table" construct.
This is a lightweight table object that has only a name and a
collection of columns, which are typically produced
by the :func:`.expression.column` function::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The :class:`.TableClause` construct serves as the base for
the more commonly used :class:`~.schema.Table` object, providing
the usual set of :class:`~.expression.FromClause` services including
the ``.c.`` collection and statement generation methods.
It does **not** provide all the additional schema-level services
of :class:`~.schema.Table`, including constraints, references to other
tables, or support for :class:`.MetaData`-level services. It's useful
on its own as an ad-hoc construct used to generate quick SQL
statements when a more fully fledged :class:`~.schema.Table`
is not on hand.
"""
__visit_name__ = 'table'
named_with_column = True
implicit_returning = False
""":class:`.TableClause` doesn't support having a primary key or column
-level defaults, so implicit returning doesn't apply."""
_autoincrement_column = None
"""No PK or default support so no autoincrement column."""
def __init__(self, name, *columns):
"""Produce a new :class:`.TableClause`.
The object returned is an instance of :class:`.TableClause`, which
represents the "syntactical" portion of the schema-level
:class:`~.schema.Table` object.
It may be used to construct lightweight table constructs.
Note that the :func:`.expression.table` function is not part of
the ``sqlalchemy`` namespace. It must be imported from the
``sql`` package::
from sqlalchemy.sql import table, column
:param name: Name of the table.
:param columns: A collection of :func:`.expression.column` constructs.
"""
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
for c in columns:
self.append_column(c)
def _init_collections(self):
pass
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
def append_column(self, c):
self._columns[c.key] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
@util.dependencies("sqlalchemy.sql.functions")
def count(self, functions, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.TableClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return Select(
[functions.func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
@util.dependencies("sqlalchemy.sql.dml")
def insert(self, dml, values=None, inline=False, **kwargs):
"""Generate an :func:`.insert` construct against this
:class:`.TableClause`.
E.g.::
table.insert().values(name='foo')
See :func:`.insert` for argument and usage information.
"""
return dml.Insert(self, values=values, inline=inline, **kwargs)
@util.dependencies("sqlalchemy.sql.dml")
def update(self, dml, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`.update` construct against this
:class:`.TableClause`.
E.g.::
table.update().where(table.c.id==7).values(name='foo')
See :func:`.update` for argument and usage information.
"""
return dml.Update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
@util.dependencies("sqlalchemy.sql.dml")
def delete(self, dml, whereclause=None, **kwargs):
"""Generate a :func:`.delete` construct against this
:class:`.TableClause`.
E.g.::
table.delete().where(table.c.id==7)
See :func:`.delete` for argument and usage information.
"""
return dml.Delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class ForUpdateArg(ClauseElement):
@classmethod
def parse_legacy_select(self, arg):
"""Parse the for_update arugment of :func:`.select`.
:param mode: Defines the lockmode to use.
``None`` - translates to no lockmode
``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
``'nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
``'read_nowait'`` - translates to ``FOR SHARE NOWAIT``
(supported by PostgreSQL). ``FOR SHARE`` and
``FOR SHARE NOWAIT`` (PostgreSQL).
"""
if arg in (None, False):
return None
nowait = read = False
if arg == 'nowait':
nowait = True
elif arg == 'read':
read = True
elif arg == 'read_nowait':
read = nowait = True
elif arg is not True:
raise exc.ArgumentError("Unknown for_update argument: %r" % arg)
return ForUpdateArg(read=read, nowait=nowait)
@property
def legacy_for_update_value(self):
if self.read and not self.nowait:
return "read"
elif self.read and self.nowait:
return "read_nowait"
elif self.nowait:
return "nowait"
else:
return True
def _copy_internals(self, clone=_clone, **kw):
if self.of is not None:
self.of = [clone(col, **kw) for col in self.of]
def __init__(self, nowait=False, read=False, of=None):
"""Represents arguments specified to :meth:`.Select.for_update`.
.. versionadded:: 0.9.0
"""
self.nowait = nowait
self.read = read
if of is not None:
self.of = [_interpret_as_column_or_from(elem)
for elem in util.to_list(of)]
else:
self.of = None
class SelectBase(Executable, FromClause):
"""Base class for SELECT statements.
This includes :class:`.Select`, :class:`.CompoundSelect` and
:class:`.TextAsFrom`.
"""
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`ScalarSelect`.
"""
return ScalarSelect(self)
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
.. seealso::
:meth:`~.SelectBase.as_scalar`.
"""
return self.as_scalar().label(name)
def cte(self, name=None, recursive=False):
"""Return a new :class:`.CTE`, or Common Table Expression instance.
Common table expressions are a SQL standard whereby SELECT
statements can draw upon secondary statements specified along
with the primary statement, using a clause called "WITH".
Special semantics regarding UNION can also be employed to
allow "recursive" queries, where a SELECT statement can draw
upon the set of rows that have previously been selected.
SQLAlchemy detects :class:`.CTE` objects, which are treated
similarly to :class:`.Alias` objects, as special elements
to be delivered to the FROM clause of the statement as well
as to a WITH clause at the top of the statement.
.. versionadded:: 0.7.6
:param name: name given to the common table expression. Like
:meth:`._FromClause.alias`, the name can be left as ``None``
in which case an anonymous symbol will be used at query
compile time.
:param recursive: if ``True``, will render ``WITH RECURSIVE``.
A recursive common table expression is intended to be used in
conjunction with UNION ALL in order to derive rows
from those already selected.
The following examples illustrate two examples from
Postgresql's documentation at
http://www.postgresql.org/docs/8.4/static/queries-with.html.
Example 1, non recursive::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
orders = Table('orders', metadata,
Column('region', String),
Column('amount', Integer),
Column('product', String),
Column('quantity', Integer)
)
regional_sales = select([
orders.c.region,
func.sum(orders.c.amount).label('total_sales')
]).group_by(orders.c.region).cte("regional_sales")
top_regions = select([regional_sales.c.region]).\\
where(
regional_sales.c.total_sales >
select([
func.sum(regional_sales.c.total_sales)/10
])
).cte("top_regions")
statement = select([
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales")
]).where(orders.c.region.in_(
select([top_regions.c.region])
)).group_by(orders.c.region, orders.c.product)
result = conn.execute(statement).fetchall()
Example 2, WITH RECURSIVE::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
parts = Table('parts', metadata,
Column('part', String),
Column('sub_part', String),
Column('quantity', Integer),
)
included_parts = select([
parts.c.sub_part,
parts.c.part,
parts.c.quantity]).\\
where(parts.c.part=='our part').\\
cte(recursive=True)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union_all(
select([
parts_alias.c.part,
parts_alias.c.sub_part,
parts_alias.c.quantity
]).
where(parts_alias.c.part==incl_alias.c.sub_part)
)
statement = select([
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
]).\
select_from(included_parts.join(parts,
included_parts.c.part==parts.c.part)).\\
group_by(included_parts.c.sub_part)
result = conn.execute(statement).fetchall()
.. seealso::
:meth:`.orm.query.Query.cte` - ORM version of :meth:`.SelectBase.cte`.
"""
return CTE(self, name=name, recursive=recursive)
@_generative
@util.deprecated('0.6',
message="``autocommit()`` is deprecated. Use "
":meth:`.Executable.execution_options` with the "
"'autocommit' flag.")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to
True.
"""
self._execution_options = \
self._execution_options.union({'autocommit': True})
def _generate(self):
"""Override the default _generate() method to also clear out
exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@property
def _from_objects(self):
return [self]
class GenerativeSelect(SelectBase):
"""Base class for SELECT statements where additional elements can be
added.
This serves as the base for :class:`.Select` and :class:`.CompoundSelect`
where elements such as ORDER BY, GROUP BY can be added and column rendering
can be controlled. Compare to :class:`.TextAsFrom`, which, while it
subclasses :class:`.SelectBase` and is also a SELECT construct, represents
a fixed textual string which cannot be altered at this level, only
wrapped as a subquery.
.. versionadded:: 0.9.0 :class:`.GenerativeSelect` was added to
provide functionality specific to :class:`.Select` and :class:`.CompoundSelect`
while allowing :class:`.SelectBase` to be used for other SELECT-like
objects, e.g. :class:`.TextAsFrom`.
"""
_order_by_clause = ClauseList()
_group_by_clause = ClauseList()
_limit = None
_offset = None
_for_update_arg = None
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
if for_update is not False:
self._for_update_arg = ForUpdateArg.parse_legacy_select(for_update)
if autocommit is not None:
util.warn_deprecated('autocommit on select() is '
'deprecated. Use .execution_options(a'
'utocommit=True)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if limit is not None:
self._limit = util.asint(limit)
if offset is not None:
self._offset = util.asint(offset)
self._bind = bind
if order_by is not None:
self._order_by_clause = ClauseList(*util.to_list(order_by))
if group_by is not None:
self._group_by_clause = ClauseList(*util.to_list(group_by))
@property
def for_update(self):
"""Provide legacy dialect support for the ``for_update`` attribute.
"""
if self._for_update_arg is not None:
return self._for_update_arg.legacy_for_update_value
else:
return None
@for_update.setter
def for_update(self, value):
self._for_update_arg = ForUpdateArg.parse_legacy_select(value)
@_generative
def with_for_update(self, nowait=False, read=False, of=None):
"""Specify a ``FOR UPDATE`` clause for this :class:`.GenerativeSelect`.
E.g.::
stmt = select([table]).with_for_update(nowait=True)
On a database like Postgresql or Oracle, the above would render a
statement like::
SELECT table.a, table.b FROM table FOR UPDATE NOWAIT
on other backends, the ``nowait`` option is ignored and instead
would produce::
SELECT table.a, table.b FROM table FOR UPDATE
When called with no arguments, the statement will render with
the suffix ``FOR UPDATE``. Additional arguments can then be
provided which allow for common database-specific
variants.
:param nowait: boolean; will render ``FOR UPDATE NOWAIT`` on Oracle and
Postgresql dialects.
:param read: boolean; will render ``LOCK IN SHARE MODE`` on MySQL,
``FOR SHARE`` on Postgresql. On Postgresql, when combined with
``nowait``, will render ``FOR SHARE NOWAIT``.
:param of: SQL expression or list of SQL expression elements
(typically :class:`.Column` objects or a compatible expression) which
will render into a ``FOR UPDATE OF`` clause; supported by PostgreSQL
and Oracle. May render as a table or as a column depending on
backend.
.. versionadded:: 0.9.0
"""
self._for_update_arg = ForUpdateArg(nowait=nowait, read=read, of=of)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion
applied."""
self._limit = util.asint(limit)
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion
applied."""
self._offset = util.asint(offset)
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY
criterion applied.
The criterion will be appended to any pre-existing ORDER BY
criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY
criterion applied.
The criterion will be appended to any pre-existing GROUP BY
criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
This is an **in-place** mutation method; the
:meth:`~.GenerativeSelect.order_by` method is preferred, as it provides standard
:term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
This is an **in-place** mutation method; the
:meth:`~.GenerativeSelect.group_by` method is preferred, as it provides standard
:term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
class CompoundSelect(GenerativeSelect):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations.
.. seealso::
:func:`.union`
:func:`.union_all`
:func:`.intersect`
:func:`.intersect_all`
:func:`.except`
:func:`.except_all`
"""
__visit_name__ = 'compound_select'
UNION = util.symbol('UNION')
UNION_ALL = util.symbol('UNION ALL')
EXCEPT = util.symbol('EXCEPT')
EXCEPT_ALL = util.symbol('EXCEPT ALL')
INTERSECT = util.symbol('INTERSECT')
INTERSECT_ALL = util.symbol('INTERSECT ALL')
_is_from_container = True
def __init__(self, keyword, *selects, **kwargs):
self._auto_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c._all_columns)
elif len(s.c._all_columns) != numcols:
raise exc.ArgumentError('All selectables passed to '
'CompoundSelect must have identical numbers of '
'columns; select #%d has %d columns, select '
'#%d has %d' % (1, len(self.selects[0].c._all_columns), n
+ 1, len(s.c._all_columns)))
self.selects.append(s.self_group(self))
GenerativeSelect.__init__(self, **kwargs)
@classmethod
def _create_union(cls, *selects, **kwargs):
"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs)
@classmethod
def _create_union_all(cls, *selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs)
@classmethod
def _create_except(cls, *selects, **kwargs):
"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs)
@classmethod
def _create_except_all(cls, *selects, **kwargs):
"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs)
@classmethod
def _create_intersect(cls, *selects, **kwargs):
"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs)
@classmethod
def _create_intersect_all(cls, *selects, **kwargs):
"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs)
def _scalar_type(self):
return self.selects[0]._scalar_type()
def self_group(self, against=None):
return FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c._all_columns for s in self.selects]):
# this is a slightly hacky thing - the union exports a
# column that resembles just that of the *first* selectable.
# to get at a "composite" column, particularly foreign keys,
# you have to dig through the proxies collection which we
# generate below. We may want to improve upon this, such as
# perhaps _make_proxy can accept a list of other columns
# that are "shared" - schema.column can then copy all the
# ForeignKeys in. this would allow the union() to have all
# those fks too.
proxy = cols[0]._make_proxy(self,
name=cols[0]._label if self.use_labels else None,
key=cols[0]._key_label if self.use_labels else None)
# hand-construct the "_proxies" collection to include all
# derived columns place a 'weight' annotation corresponding
# to how low in the list of select()s the column occurs, so
# that the corresponding_column() operation can resolve
# conflicts
proxy._proxies = [c._annotate({'weight': i + 1}) for (i,
c) in enumerate(cols)]
def _refresh_for_new_column(self, column):
for s in self.selects:
s._refresh_for_new_column(column)
if not self._cols_populated:
return None
raise NotImplementedError("CompoundSelect constructs don't support "
"addition of columns to underlying selectables")
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.selects = [clone(s, **kw) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause', '_for_update_arg'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) \
+ [self._order_by_clause, self._group_by_clause] \
+ list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class HasPrefixes(object):
_prefixes = ()
@_generative
def prefix_with(self, *expr, **kw):
"""Add one or more expressions following the statement keyword, i.e.
SELECT, INSERT, UPDATE, or DELETE. Generative.
This is used to support backend-specific prefix keywords such as those
provided by MySQL.
E.g.::
stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")
Multiple prefixes can be specified by multiple calls
to :meth:`.prefix_with`.
:param \*expr: textual or :class:`.ClauseElement` construct which
will be rendered following the INSERT, UPDATE, or DELETE
keyword.
:param \**kw: A single keyword 'dialect' is accepted. This is an
optional string dialect name which will
limit rendering of this prefix to only that dialect.
"""
dialect = kw.pop('dialect', None)
if kw:
raise exc.ArgumentError("Unsupported argument(s): %s" %
",".join(kw))
self._setup_prefixes(expr, dialect)
def _setup_prefixes(self, prefixes, dialect=None):
self._prefixes = self._prefixes + tuple(
[(_literal_as_text(p), dialect) for p in prefixes])
class Select(HasPrefixes, GenerativeSelect):
"""Represents a ``SELECT`` statement.
"""
__visit_name__ = 'select'
_prefixes = ()
_hints = util.immutabledict()
_distinct = False
_from_cloned = None
_correlate = ()
_correlate_except = None
_memoized_property = SelectBase._memoized_property
_is_select = True
def __init__(self,
columns=None,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
"""Construct a new :class:`.Select`.
Similar functionality is also available via the :meth:`.FromClause.select`
method on any :class:`.FromClause`.
All arguments which accept :class:`.ClauseElement` arguments also accept
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
.. seealso::
:ref:`coretutorial_selecting` - Core Tutorial description of
:func:`.select`.
:param columns:
A list of :class:`.ClauseElement` objects, typically
:class:`.ColumnElement` objects or subclasses, which will form the
columns clause of the resulting statement. For all members which are
instances of :class:`.Selectable`, the individual :class:`.ColumnElement`
members of the :class:`.Selectable` will be added individually to the
columns clause. For example, specifying a
:class:`~sqlalchemy.schema.Table` instance will result in all the
contained :class:`~sqlalchemy.schema.Column` objects within to be added
to the columns clause.
This argument is not present on the form of :func:`select()`
available on :class:`~sqlalchemy.schema.Table`.
:param whereclause:
A :class:`.ClauseElement` expression which will be used to form the
``WHERE`` clause.
:param from_obj:
A list of :class:`.ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. Note that "from" objects are
automatically located within the columns and whereclause ClauseElements.
Use this parameter to explicitly specify "from" objects which are not
automatically locatable. This could include
:class:`~sqlalchemy.schema.Table` objects that aren't otherwise present,
or :class:`.Join` objects whose presence will supercede that of the
:class:`~sqlalchemy.schema.Table` objects already located in the other
clauses.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind=None:
an :class:`~.Engine` or :class:`~.Connection` instance
to which the
resulting :class:`.Select` object will be bound. The :class:`.Select`
object will otherwise automatically bind to whatever
:class:`~.base.Connectable` instances can be located within its contained
:class:`.ClauseElement` members.
:param correlate=True:
indicates that this :class:`.Select` object should have its
contained :class:`.FromClause` elements "correlated" to an enclosing
:class:`.Select` object. This means that any :class:`.ClauseElement`
instance within the "froms" collection of this :class:`.Select`
which is also present in the "froms" collection of an
enclosing select will not be rendered in the ``FROM`` clause
of this select statement.
:param distinct=False:
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
The boolean argument may also be a column expression or list
of column expressions - this is a special calling form which
is understood by the Postgresql dialect to render the
``DISTINCT ON (<columns>)`` syntax.
``distinct`` is also available via the :meth:`~.Select.distinct`
generative method.
:param for_update=False:
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement.
.. deprecated:: 0.9.0 - use :meth:`.GenerativeSelect.with_for_update`
to specify the structure of the ``FOR UPDATE`` clause.
``for_update`` accepts various string values interpreted by
specific backends, including:
* ``"read"`` - on MySQL, translates to ``LOCK IN SHARE MODE``;
on Postgresql, translates to ``FOR SHARE``.
* ``"nowait"`` - on Postgresql and Oracle, translates to
``FOR UPDATE NOWAIT``.
* ``"read_nowait"`` - on Postgresql, translates to
``FOR SHARE NOWAIT``.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
:param group_by:
a list of :class:`.ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select.
:param having:
a :class:`.ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used.
:param limit=None:
a numerical value which usually compiles to a ``LIMIT``
expression in the resulting select. Databases that don't
support ``LIMIT`` will attempt to provide similar
functionality.
:param offset=None:
a numeric value which usually compiles to an ``OFFSET``
expression in the resulting select. Databases that don't
support ``OFFSET`` will attempt to provide similar
functionality.
:param order_by:
a scalar or list of :class:`.ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
:param use_labels=False:
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`.Select` object will use these
names as well for targeting column members.
use_labels is also available via the :meth:`~.GenerativeSelect.apply_labels`
generative method.
"""
self._auto_correlate = correlate
if distinct is not False:
if distinct is True:
self._distinct = True
else:
self._distinct = [
_literal_as_text(e)
for e in util.to_list(distinct)
]
if from_obj is not None:
self._from_obj = util.OrderedSet(
_interpret_as_from(f)
for f in util.to_list(from_obj))
else:
self._from_obj = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._setup_prefixes(prefixes)
GenerativeSelect.__init__(self, **kwargs)
@property
def _froms(self):
# would love to cache this,
# but there's just enough edge cases, particularly now that
# declarative encourages construction of SQL expressions
# without tables present, to just regen this each time.
froms = []
seen = set()
translate = self._from_cloned
def add(items):
for item in items:
if item is self:
raise exc.InvalidRequestError(
"select() construct refers to itself as a FROM")
if translate and item in translate:
item = translate[item]
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
add(_from_objects(*self._raw_columns))
if self._whereclause is not None:
add(_from_objects(self._whereclause))
add(self._from_obj)
return froms
def _get_display_froms(self, explicit_correlate_froms=None,
implicit_correlate_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = set(itertools.chain(*[
_expand_cloned(f._hide_froms)
for f in froms]))
if toremove:
# if we're maintaining clones of froms,
# add the copies out to the toremove list. only include
# clones that are lexical equivalents.
if self._from_cloned:
toremove.update(
self._from_cloned[f] for f in
toremove.intersection(self._from_cloned)
if self._from_cloned[f]._is_lexical_equivalent(f)
)
# filter out to FROM clauses not in the list,
# using a list to maintain ordering
froms = [f for f in froms if f not in toremove]
if self._correlate:
to_correlate = self._correlate
if to_correlate:
froms = [
f for f in froms if f not in
_cloned_intersection(
_cloned_intersection(froms, explicit_correlate_froms or ()),
to_correlate
)
]
if self._correlate_except is not None:
froms = [
f for f in froms if f not in
_cloned_difference(
_cloned_intersection(froms, explicit_correlate_froms or ()),
self._correlate_except
)
]
if self._auto_correlate and \
implicit_correlate_froms and \
len(froms) > 1:
froms = [
f for f in froms if f not in
_cloned_intersection(froms, implicit_correlate_froms)
]
if not len(froms):
raise exc.InvalidRequestError("Select statement '%s"
"' returned no FROM clauses due to "
"auto-correlation; specify "
"correlate(<tables>) to control "
"correlation manually." % self)
return froms
def _scalar_type(self):
elem = self._raw_columns[0]
cols = list(elem._select_iterable)
return cols[0].type
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
@_generative
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing hint for the given selectable to this
:class:`.Select`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`.Table` or :class:`.Alias` passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\
with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
"""
self._hints = self._hints.union(
{(selectable, dialect_name): text})
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@_memoized_property.method
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property,
which is specifically for those FromClause elements that would
actually be rendered.
"""
froms = self._froms
return froms + list(_from_objects(*froms))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone, **kw):
# Select() object has been cloned and probably adapted by the
# given clone function. Apply the cloning function to internal
# objects
# 1. keep a dictionary of the froms we've cloned, and what
# they've become. This is consulted later when we derive
# additional froms from "whereclause" and the columns clause,
# which may still reference the uncloned parent table.
# as of 0.7.4 we also put the current version of _froms, which
# gets cleared on each generation. previously we were "baking"
# _froms into self._from_obj.
self._from_cloned = from_cloned = dict((f, clone(f, **kw))
for f in self._from_obj.union(self._froms))
# 3. update persistent _from_obj with the cloned versions.
self._from_obj = util.OrderedSet(from_cloned[f] for f in
self._from_obj)
# the _correlate collection is done separately, what can happen
# here is the same item is _correlate as in _from_obj but the
# _correlate version has an annotation on it - (specifically
# RelationshipProperty.Comparator._criterion_exists() does
# this). Also keep _correlate liberally open with it's previous
# contents, as this set is used for matching, not rendering.
self._correlate = set(clone(f) for f in
self._correlate).union(self._correlate)
# 4. clone other things. The difficulty here is that Column
# objects are not actually cloned, and refer to their original
# .table, resulting in the wrong "from" parent after a clone
# operation. Hence _from_cloned and _from_obj supercede what is
# present here.
self._raw_columns = [clone(c, **kw) for c in self._raw_columns]
for attr in '_whereclause', '_having', '_order_by_clause', \
'_group_by_clause', '_for_update_arg':
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
# erase exported column list, _froms collection,
# etc.
self._reset_exported()
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
"""
self.append_column(column)
@util.dependencies("sqlalchemy.sql.util")
def reduce_columns(self, sqlutil, only_synonyms=True):
"""Return a new :func`.select` construct with redundantly
named, equivalently-valued columns removed from the columns clause.
"Redundant" here means two columns where one refers to the
other either based on foreign key, or via a simple equality
comparison in the WHERE clause of the statement. The primary purpose
of this method is to automatically construct a select statement
with all uniquely-named columns, without the need to use
table-qualified labels as :meth:`.apply_labels` does.
When columns are omitted based on foreign key, the referred-to
column is the one that's kept. When columns are omitted based on
WHERE eqivalence, the first column in the columns clause is the
one that's kept.
:param only_synonyms: when True, limit the removal of columns
to those which have the same name as the equivalent. Otherwise,
all columns that are equivalent to another are removed.
.. versionadded:: 0.8
"""
return self.with_only_columns(
sqlutil.reduce_columns(
self.inner_columns,
only_synonyms=only_synonyms,
*(self._whereclause, ) + tuple(self._from_obj)
)
)
@_generative
def with_only_columns(self, columns):
"""Return a new :func:`.select` construct with its columns
clause replaced with the given columns.
.. versionchanged:: 0.7.3
Due to a bug fix, this method has a slight
behavioral change as of version 0.7.3.
Prior to version 0.7.3, the FROM clause of
a :func:`.select` was calculated upfront and as new columns
were added; in 0.7.3 and later it's calculated
at compile time, fixing an issue regarding late binding
of columns to parent tables. This changes the behavior of
:meth:`.Select.with_only_columns` in that FROM clauses no
longer represented in the new list are dropped,
but this behavior is more consistent in
that the FROM clauses are consistently derived from the
current columns clause. The original intent of this method
is to allow trimming of the existing columns list to be fewer
columns than originally present; the use case of replacing
the columns list with an entirely different one hadn't
been anticipated until 0.7.3 was released; the usage
guidelines below illustrate how this should be done.
This method is exactly equivalent to as if the original
:func:`.select` had been called with the given columns
clause. I.e. a statement::
s = select([table1.c.a, table1.c.b])
s = s.with_only_columns([table1.c.b])
should be exactly equivalent to::
s = select([table1.c.b])
This means that FROM clauses which are only derived
from the column list will be discarded if the new column
list no longer contains that FROM::
>>> table1 = table('t1', column('a'), column('b'))
>>> table2 = table('t2', column('a'), column('b'))
>>> s1 = select([table1.c.a, table2.c.b])
>>> print s1
SELECT t1.a, t2.b FROM t1, t2
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1
The preferred way to maintain a specific FROM clause
in the construct, assuming it won't be represented anywhere
else (i.e. not in the WHERE clause, etc.) is to set it using
:meth:`.Select.select_from`::
>>> s1 = select([table1.c.a, table2.c.b]).\\
... select_from(table1.join(table2,
... table1.c.a==table2.c.a))
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a
Care should also be taken to use the correct
set of column objects passed to :meth:`.Select.with_only_columns`.
Since the method is essentially equivalent to calling the
:func:`.select` construct in the first place with the given
columns, the columns passed to :meth:`.Select.with_only_columns`
should usually be a subset of those which were passed
to the :func:`.select` construct, not those which are available
from the ``.c`` collection of that :func:`.select`. That
is::
s = select([table1.c.a, table1.c.b]).select_from(table1)
s = s.with_only_columns([table1.c.b])
and **not**::
# usually incorrect
s = s.with_only_columns([s.c.b])
The latter would produce the SQL::
SELECT b
FROM (SELECT t1.a AS a, t1.b AS b
FROM t1), t1
Since the :func:`.select` construct is essentially being
asked to select both from ``table1`` as well as itself.
"""
self._reset_exported()
rc = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
rc.append(c)
self._raw_columns = rc
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to
its HAVING clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self, *expr):
"""Return a new select() construct which will apply DISTINCT to its
columns clause.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if expr:
expr = [_literal_as_text(e) for e in expr]
if isinstance(self._distinct, list):
self._distinct = self._distinct + expr
else:
self._distinct = expr
else:
self._distinct = True
@_generative
def select_from(self, fromclause):
"""return a new :func:`.select` construct with the
given FROM expression
merged into its list of FROM objects.
E.g.::
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s = select([table1.c.a]).\\
select_from(
table1.join(table2, table1.c.a==table2.c.b)
)
The "from" list is a unique set on the identity of each element,
so adding an already present :class:`.Table` or other selectable
will have no effect. Passing a :class:`.Join` that refers
to an already present :class:`.Table` or other selectable will have
the effect of concealing the presence of that selectable as
an individual element in the rendered FROM list, instead
rendering it into a JOIN clause.
While the typical purpose of :meth:`.Select.select_from` is to
replace the default, derived FROM clause with a join, it can
also be called with individual table elements, multiple times
if desired, in the case that the FROM clause cannot be fully
derived from the columns clause::
select([func.count('*')]).select_from(table1)
"""
self.append_from(fromclause)
@_generative
def correlate(self, *fromclauses):
"""return a new :class:`.Select` which will correlate the given FROM
clauses to that of an enclosing :class:`.Select`.
Calling this method turns off the :class:`.Select` object's
default behavior of "auto-correlation". Normally, FROM elements
which appear in a :class:`.Select` that encloses this one via
its :term:`WHERE clause`, ORDER BY, HAVING or
:term:`columns clause` will be omitted from this :class:`.Select`
object's :term:`FROM clause`.
Setting an explicit correlation collection using the
:meth:`.Select.correlate` method provides a fixed list of FROM objects
that can potentially take place in this process.
When :meth:`.Select.correlate` is used to apply specific FROM clauses
for correlation, the FROM elements become candidates for
correlation regardless of how deeply nested this :class:`.Select`
object is, relative to an enclosing :class:`.Select` which refers to
the same FROM object. This is in contrast to the behavior of
"auto-correlation" which only correlates to an immediate enclosing
:class:`.Select`. Multi-level correlation ensures that the link
between enclosed and enclosing :class:`.Select` is always via
at least one WHERE/ORDER BY/HAVING/columns clause in order for
correlation to take place.
If ``None`` is passed, the :class:`.Select` object will correlate
none of its FROM entries, and all will render unconditionally
in the local FROM clause.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate collection.
.. versionchanged:: 0.8.0 ORM-mapped classes are accepted by
:meth:`.Select.correlate`.
.. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no
longer unconditionally removes entries from the FROM clause; instead,
the candidate FROM entries must also be matched by a FROM entry
located in an enclosing :class:`.Select`, which ultimately encloses
this one as present in the WHERE clause, ORDER BY clause, HAVING
clause, or columns clause of an enclosing :meth:`.Select`.
.. versionchanged:: 0.8.2 explicit correlation takes place
via any level of nesting of :class:`.Select` objects; in previous
0.8 versions, correlation would only occur relative to the immediate
enclosing :class:`.Select` construct.
.. seealso::
:meth:`.Select.correlate_except`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate = ()
else:
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclauses)
@_generative
def correlate_except(self, *fromclauses):
"""return a new :class:`.Select` which will omit the given FROM
clauses from the auto-correlation process.
Calling :meth:`.Select.correlate_except` turns off the
:class:`.Select` object's default behavior of
"auto-correlation" for the given FROM elements. An element
specified here will unconditionally appear in the FROM list, while
all other FROM elements remain subject to normal auto-correlation
behaviors.
.. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except`
method was improved to fully prevent FROM clauses specified here
from being omitted from the immediate FROM clause of this
:class:`.Select`.
If ``None`` is passed, the :class:`.Select` object will correlate
all of its FROM entries.
.. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will
correctly auto-correlate all FROM clauses.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate-exception collection.
.. seealso::
:meth:`.Select.correlate`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate_except = ()
else:
self._correlate_except = set(self._correlate_except or ()).union(
_interpret_as_from(f) for f in fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.correlate` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._auto_correlate = False
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclause)
def append_column(self, column):
"""append the given column expression to the columns clause of this
select() construct.
This is an **in-place** mutation method; the
:meth:`~.Select.column` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
column = _interpret_as_column_or_from(column)
if isinstance(column, ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.prefix_with` method is preferred, as it provides standard
:term:`method chaining`.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE
criterion.
The expression will be joined to existing WHERE criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.where` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
self._whereclause = and_(True_._ifnone(self._whereclause), whereclause)
def append_having(self, having):
"""append the given expression to this select() construct's HAVING
criterion.
The expression will be joined to existing HAVING criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.having` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
self._having = and_(True_._ifnone(self._having), having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's
FROM clause.
This is an **in-place** mutation method; the
:meth:`~.Select.select_from` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
fromclause = _interpret_as_from(fromclause)
self._from_obj = self._from_obj.union([fromclause])
@_memoized_property
def _columns_plus_names(self):
if self.use_labels:
names = set()
def name_for_col(c):
if c._label is None:
return (None, c)
name = c._label
if name in names:
name = c.anon_label
else:
names.add(name)
return name, c
return [
name_for_col(c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
else:
return [
(None, c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
def _populate_column_collection(self):
for name, c in self._columns_plus_names:
if not hasattr(c, '_make_proxy'):
continue
if name is None:
key = None
elif self.use_labels:
key = c._key_label
if key is not None and key in self.c:
key = c.anon_label
else:
key = None
c._make_proxy(self, key=key,
name=name,
name_is_truncatable=True)
def _refresh_for_new_column(self, column):
for fromclause in self._froms:
col = fromclause._refresh_for_new_column(column)
if col is not None:
if col in self.inner_columns and self._cols_populated:
our_label = col._key_label if self.use_labels else col.key
if our_label not in self.c:
return col._make_proxy(self,
name=col._label if self.use_labels else None,
key=col._key_label if self.use_labels else None,
name_is_truncatable=True)
return None
return None
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
This produces an element that can be embedded in an expression. Note
that this method is called automatically as needed when constructing
expressions and should not require explicit use.
"""
if isinstance(against, CompoundSelect):
return self
return FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given
selectable."""
return CompoundSelect._create_union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return CompoundSelect._create_union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given
selectable."""
return CompoundSelect._create_except(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the
given selectable.
"""
return CompoundSelect._create_except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return CompoundSelect._create_intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the
given selectable.
"""
return CompoundSelect._create_intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
froms = self._froms
if not froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class ScalarSelect(Generative, Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
self.type = element._scalar_type()
@property
def columns(self):
raise exc.InvalidRequestError('Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.')
c = columns
@_generative
def where(self, crit):
"""Apply a WHERE clause to the SELECT statement referred to
by this :class:`.ScalarSelect`.
"""
self.element = self.element.where(crit)
def self_group(self, **kwargs):
return self
class Exists(UnaryExpression):
"""Represent an ``EXISTS`` clause.
"""
__visit_name__ = UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
"""Construct a new :class:`.Exists` against an existing
:class:`.Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
if args and isinstance(args[0], (SelectBase, ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = Select(*args, **kwargs).as_scalar().self_group()
UnaryExpression.__init__(self, s, operator=operators.exists,
type_=type_api.BOOLEANTYPE)
def select(self, whereclause=None, **params):
return Select([self], whereclause, **params)
def correlate(self, *fromclause):
e = self._clone()
e.element = self.element.correlate(*fromclause).self_group()
return e
def correlate_except(self, *fromclause):
e = self._clone()
e.element = self.element.correlate_except(*fromclause).self_group()
return e
def select_from(self, clause):
"""return a new :class:`.Exists` construct, applying the given
expression to the :meth:`.Select.select_from` method of the select
statement contained.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class TextAsFrom(SelectBase):
"""Wrap a :class:`.TextClause` construct within a :class:`.SelectBase`
interface.
This allows the :class:`.TextClause` object to gain a ``.c`` collection and
other FROM-like capabilities such as :meth:`.FromClause.alias`,
:meth:`.SelectBase.cte`, etc.
The :class:`.TextAsFrom` construct is produced via the
:meth:`.TextClause.columns` method - see that method for details.
.. versionadded:: 0.9.0
.. seealso::
:func:`.text`
:meth:`.TextClause.columns`
"""
__visit_name__ = "text_as_from"
_textual = True
def __init__(self, text, columns):
self.element = text
self.column_args = columns
@property
def _bind(self):
return self.element._bind
@_generative
def bindparams(self, *binds, **bind_as_values):
self.element = self.element.bindparams(*binds, **bind_as_values)
def _populate_column_collection(self):
for c in self.column_args:
c._make_proxy(self)
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.element = clone(self.element, **kw)
def _scalar_type(self):
return self.column_args[0].type
class AnnotatedFromClause(Annotated):
def __init__(self, element, values):
# force FromClause to generate their internal
# collections into __dict__
element.c
Annotated.__init__(self, element, values)
| Cito/sqlalchemy | lib/sqlalchemy/sql/selectable.py | Python | mit | 108,071 |
from unittest.mock import Mock
import pytest
from mitmproxy import connection
from mitmproxy.proxy import events, commands
@pytest.fixture
def tconn() -> connection.Server:
return connection.Server(None)
def test_dataclasses(tconn):
assert repr(events.Start())
assert repr(events.DataReceived(tconn, b"foo"))
assert repr(events.ConnectionClosed(tconn))
def test_command_completed():
with pytest.raises(TypeError):
events.CommandCompleted()
assert repr(events.HookCompleted(Mock(), None))
class FooCommand(commands.Command):
pass
with pytest.warns(RuntimeWarning, match="properly annotated"):
class FooCompleted(events.CommandCompleted):
pass
class FooCompleted1(events.CommandCompleted):
command: FooCommand
with pytest.warns(RuntimeWarning, match="conflicting subclasses"):
class FooCompleted2(events.CommandCompleted):
command: FooCommand
| mitmproxy/mitmproxy | test/mitmproxy/proxy/test_events.py | Python | mit | 957 |
#! /usr/bin/env python
'''Make sure the Riak client is sane'''
import unittest
from test import BaseTest
from simhash_db import Client
class RiakTest(BaseTest, unittest.TestCase):
'''Test the Riak client'''
def make_client(self, name, num_blocks, num_bits):
return Client('riak', name, num_blocks, num_bits)
if __name__ == '__main__':
unittest.main()
| seomoz/simhash-db-py | test/test_riak.py | Python | mit | 377 |
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
#import mshr
from dolfin import *
import sympy as sy
import numpy as np
import ExactSol
import MatrixOperations as MO
import CheckPetsc4py as CP
from dolfin import __version__
import MaxwellPrecond as MP
import StokesPrecond
import time
def myCCode(A):
return sy.ccode(A).replace('M_PI','pi')
def Domain(n):
# mesh = RectangleMesh(0., -1., 2., 1., n, n)
# mesh = RectangleMesh(0., 0., 1.0, 1.0, n, n)
mesh = UnitSquareMesh(n, n)
class Left(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0.0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 1.0)
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 0.0)
class Top(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 1.0)
# mesh = RectangleMesh(Point(0., -1.), Point(1*10., 1.), 1*5*n, n)
# class Left(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[0], 0.0)
# class Right(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[0], 1*10.0)
# class Bottom(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[1], -1.)
# class Top(SubDomain):
# def inside(self, x, on_boundary):
# return near(x[1], 1.)
left = Left()
top = Top()
right = Right()
bottom = Bottom()
# Initialize mesh function for the domain
domains = CellFunction("size_t", mesh)
domains.set_all(0)
# Initialize mesh function for boundary domains
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
right.mark(boundaries, 2)
left.mark(boundaries, 2)
top.mark(boundaries, 1)
bottom.mark(boundaries, 1)
return mesh, boundaries, domains
def ExactSolution(mesh, params):
Re = 1./params[2]
Ha = sqrt(params[0]/(params[1]*params[2]))
G = 10.
x = sy.Symbol('x[0]')
y = sy.Symbol('x[1]')
b = (G/params[0])*(sy.sinh(y*Ha)/sy.sinh(Ha)-y)
d = sy.diff(x,x)
p = -G*x - (G**2)/(2*params[0])*(sy.sinh(y*Ha)/sy.sinh(Ha)-y)**2
u = (G/(params[2]*Ha*sy.tanh(Ha)))*(1-sy.cosh(y*Ha)/sy.cosh(Ha))
v = sy.diff(x, y)
r = sy.diff(x, y)
uu = y*x*sy.exp(x+y)
u = sy.diff(uu, y)
v = -sy.diff(uu, x)
p = sy.sin(x)*sy.exp(y)
bb = x*y*sy.cos(x)
b = sy.diff(bb, y)
d = -sy.diff(bb, x)
r = x*sy.sin(2*sy.pi*y)*sy.sin(2*sy.pi*x)
# b = y
# d = sy.diff(x, y)
# r = sy.diff(y, y)
J11 = p - params[2]*sy.diff(u, x)
J12 = - params[2]*sy.diff(u, y)
J21 = - params[2]*sy.diff(v, x)
J22 = p - params[2]*sy.diff(v, y)
L1 = sy.diff(u, x, x)+sy.diff(u, y, y)
L2 = sy.diff(v, x, x)+sy.diff(v, y, y)
A1 = u*sy.diff(u, x)+v*sy.diff(u, y)
A2 = u*sy.diff(v, x)+v*sy.diff(v, y)
P1 = sy.diff(p, x)
P2 = sy.diff(p, y)
C1 = sy.diff(d, x, y) - sy.diff(b, y, y)
C2 = sy.diff(b, x, y) - sy.diff(d, x, x)
NS1 = -d*(sy.diff(d, x) - sy.diff(b, y))
NS2 = b*(sy.diff(d, x) - sy.diff(b, y))
R1 = sy.diff(r, x)
R2 = sy.diff(r, y)
M1 = sy.diff(u*d-v*b, y)
M2 = -sy.diff(u*d-v*b, x)
u0 = Expression((myCCode(u), myCCode(v)), degree=4)
p0 = Expression(myCCode(p), degree=4)
b0 = Expression((myCCode(b), myCCode(d)), degree=4)
r0 = Expression(myCCode(r), degree=4)
print " u = (", str(u).replace('x[0]', 'x').replace('x[1]', 'y'), ", ", str(v).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
print " p = (", str(p).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
print " b = (", str(b).replace('x[0]', 'x').replace('x[1]', 'y'), ", ", str(d).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
print " r = (", str(r).replace('x[0]', 'x').replace('x[1]', 'y'), ")\n"
Laplacian = Expression((myCCode(L1), myCCode(L2)), degree=4)
Advection = Expression((myCCode(A1), myCCode(A2)), degree=4)
gradPres = Expression((myCCode(P1), myCCode(P2)), degree=4)
NScouple = Expression((myCCode(NS1), myCCode(NS2)), degree=4)
CurlCurl = Expression((myCCode(C1), myCCode(C2)), degree=4)
gradLagr = Expression((myCCode(R1), myCCode(R2)), degree=4)
Mcouple = Expression((myCCode(M1), myCCode(M2)), degree=4)
# pN = as_matrix(((Expression(myCCode(J11)), Expression(myCCode(J12))), (Expression(myCCode(J21)), Expression(myCCode(J22)))))
return u0, p0, b0, r0, 1, Laplacian, Advection, gradPres, NScouple, CurlCurl, gradLagr, Mcouple
# Sets up the initial guess for the MHD problem
def Stokes(V, Q, F, u0, pN, params, mesh):
parameters['reorder_dofs_serial'] = False
W = FunctionSpace(mesh, MixedElement([V, Q]))
IS = MO.IndexSet(W)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
n = FacetNormal(mesh)
# dx = Measure('dx', domain=mesh, subdomain_data=domains)
# ds = Measure('ds', domain=mesh, subdomain_data=boundaries)
a11 = params[2]*inner(grad(v), grad(u))*dx
a12 = -div(v)*p*dx
a21 = -div(u)*q*dx
a = a11+a12+a21
L = inner(v, F)*dx #- inner(pN*n,v)*ds(2)
pp = params[2]*inner(grad(v), grad(u))*dx + (1./params[2])*p*q*dx
def boundary(x, on_boundary):
return on_boundary
# bcu = DirichletBC(W.sub(0), u0, boundaries, 1)
bcu = DirichletBC(W.sub(0), u0, boundary)
# bcu = [bcu1, bcu2]
A, b = assemble_system(a, L, bcu)
A, b = CP.Assemble(A, b)
C = A.getSubMatrix(IS[1],IS[1])
u = b.duplicate()
P, Pb = assemble_system(pp, L, bcu)
P, Pb = CP.Assemble(P, Pb)
# ksp = PETSc.KSP()
# ksp.create(comm=PETSc.COMM_WORLD)
# pc = ksp.getPC()
# ksp.setType('preonly')
# pc.setType('lu')
# OptDB = PETSc.Options()
# # if __version__ != '1.6.0':
# OptDB['pc_factor_mat_solver_package'] = "pastix"
# OptDB['pc_factor_mat_ordering_type'] = "rcm"
# ksp.setFromOptions()
# ksp.setOperators(A,A)
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-8)
ksp.max_it = 200
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
ksp.setType('minres')
pc.setPythonContext(StokesPrecond.Approx(W, 1))
ksp.setOperators(A,P)
scale = b.norm()
b = b/scale
del A
start_time = time.time()
ksp.solve(b,u)
# Mits +=dodim
u = u*scale
print ("{:40}").format("Stokes solve, time: "), " ==> ",("{:4f}").format(time.time() - start_time),("{:9}").format(" Its: "), ("{:4}").format(ksp.its), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
u_k = Function(FunctionSpace(mesh, V))
p_k = Function(FunctionSpace(mesh, Q))
u_k.vector()[:] = u.getSubVector(IS[0]).array
p_k.vector()[:] = u.getSubVector(IS[1]).array
ones = Function(FunctionSpace(mesh, Q))
ones.vector()[:]=(0*ones.vector().array()+1)
p_k.vector()[:] += -assemble(p_k*dx)/assemble(ones*dx)
return u_k, p_k
def Maxwell(V, Q, F, b0, r0, params, mesh,HiptmairMatrices, Hiptmairtol):
parameters['reorder_dofs_serial'] = False
W = V*Q
W = FunctionSpace(mesh, MixedElement([V, Q]))
IS = MO.IndexSet(W)
(b, r) = TrialFunctions(W)
(c, s) = TestFunctions(W)
if params[0] == 0.0:
a11 = params[1]*inner(curl(b), curl(c))*dx
else:
a11 = params[1]*params[0]*inner(curl(b), curl(c))*dx
a21 = inner(b,grad(s))*dx
a12 = inner(c,grad(r))*dx
# print F
L = inner(c, F)*dx
a = a11+a12+a21
def boundary(x, on_boundary):
return on_boundary
# class b0(Expression):
# def __init__(self):
# self.p = 1
# def eval_cell(self, values, x, ufc_cell):
# values[0] = 0.0
# values[1] = 1.0
# def value_shape(self):
# return (2,)
bcb = DirichletBC(W.sub(0), b0, boundary)
bcr = DirichletBC(W.sub(1), r0, boundary)
bc = [bcb, bcr]
A, b = assemble_system(a, L, bc)
A, b = CP.Assemble(A, b)
u = b.duplicate()
# ksp = PETSc.KSP()
# ksp.create(comm=PETSc.COMM_WORLD)
# pc = ksp.getPC()
# ksp.setType('preonly')
# pc.setType('lu')
# OptDB = PETSc.Options()
# OptDB['pc_factor_mat_solver_package'] = "pastix"
# OptDB['pc_factor_mat_ordering_type'] = "rcm"
# ksp.setFromOptions()
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-8)
ksp.max_it = 200
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
ksp.setType('minres')
pc.setPythonContext(MP.Hiptmair(W, HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],Hiptmairtol))
scale = b.norm()
b = b/scale
ksp.setOperators(A,A)
del A
start_time = time.time()
ksp.solve(b,u)
print ("{:40}").format("Maxwell solve, time: "), " ==> ",("{:4f}").format(time.time() - start_time),("{:9}").format(" Its: "), ("{:4}").format(ksp.its), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
u = u*scale
b_k = Function(FunctionSpace(mesh, V))
r_k = Function(FunctionSpace(mesh, Q))
b_k.vector()[:] = u.getSubVector(IS[0]).array
r_k.vector()[:] = u.getSubVector(IS[1]).array
return b_k, r_k
| wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman2D/HartmanChannel.py | Python | mit | 9,266 |
from flask import render_template, request, Response
from SharedPreferences import AnglesStorage
from app import app
import requests
import urllib
import math
ipcamProtocol = 'http://'
ipcamAddress = '143.107.235.49:8085'
ipcamWebctl = '/decoder_control.cgi'
ipcamVideostream = '/videostream.cgi'
ipcamUser = 'sel630'
ipcamPasswd = 'sel630'
ipcamResolution = '32'
ipcamRate = '0'
ipcamCommands = {'up': 2, 'down': 0, 'left': 6, 'right': 4}
diagonal_correction = 0.707
@app.route('/')
@app.route('/index/')
def index():
return render_template("index.html")
@app.route('/camposition/', methods=['GET', 'POST'])
def camposition():
angles = AnglesStorage()
pospitch = angles.pitch
posyaw = angles.yaw
pitch = float(request.args['pitch'])
yaw = float(request.args['yaw'])
if pitch >= 80 or pitch <= -30:
return 'Invalid pitch'
if yaw >= 100 or yaw <= -100:
return 'Invalid yaw'
print 'pitch: ', pospitch, ' yaw: ', posyaw
movementy = int(pitch - pospitch)
movementx = int(yaw - posyaw)
# -22.5, 22.5, 67.5, 112.5, 157.2
# -0.392, 0.392, 1.178, 1.963, 2.743
movement = math.sqrt(movementx ** 2 + movementy ** 2)
movedir = math.atan2(movementy, movementx)
if movement > 3:
if movedir > -0.392 and movedir <= 0.392:
movecamera(6, movement) # move right
elif movedir > 0.392 and movedir <= 1.178:
movecamera(93, diagonal_correction * movement) # move right up
elif movedir > 1.178 and movedir <= 1.963:
movecamera(2, movement) # move up
elif movedir > 1.963 and movedir <= 2.743:
movecamera(92, diagonal_correction * movement) # move left up
elif movedir < -0.392 and movedir >= -1.178:
movecamera(91, diagonal_correction * movement) # move right down
elif movedir < -1.178 and movedir >= -1.963:
movecamera(0, movement) # move down
elif movedir < -1.963 and movedir >= -2.743:
movecamera(90, diagonal_correction * movement) # move left down
elif movedir > 2.743 or movedir < -2.743:
movecamera(4, movement) # move left
else:
return 'No movement'
pitch = movement * math.sin(movedir)
yaw = movement * math.cos(movedir)
angles.pitch = int(pospitch + pitch)
angles.yaw = int(posyaw + yaw)
angles.done()
pospitch = angles.pitch
posyaw = angles.yaw
print 'pitch: ', pospitch, ' yaw: ', posyaw
return 'ACK'
@app.route('/camposition/set_zero/')
def campositionSetzero():
angles = AnglesStorage()
angles.pitch = 0
angles.yaw = 0
angles.done()
# requests.get(ipcamProtocol + ipcamAddress + ipcamWebctl,
# {'user': ipcamUser,
# 'pwd': ipcamPasswd,
# 'command': 25})
return 'ACK'
@app.route('/camposition/cam_step/', methods=['GET', 'POST'])
def camstep():
cmd = int(request.args['move'])
movecamera(cmd, 5)
return 'ACK'
def movecamera(cmd, degree):
requests.get(ipcamProtocol + ipcamAddress + ipcamWebctl,
{'user': ipcamUser,
'pwd': ipcamPasswd,
'command': cmd,
'onestep': 1,
'degree': degree})
@app.route('/camerastream/')
def camerastream():
return render_template("camerastream.html")
@app.route('/camstream/')
def camstream():
url = "%(protocol)s%(address)s%(cgi)s?user=%(user)s&pwd=%(pwd)s&resolution\
=%(resolution)s&rate=%(rate)s" % {'protocol': ipcamProtocol,
'address': ipcamAddress,
'cgi': ipcamVideostream,
'user': ipcamUser,
'pwd': ipcamPasswd,
'resolution': ipcamResolution,
'rate': ipcamRate}
print url
ws = urllib.urlopen(url)
def stream():
while(True):
res = ""
s = ws.readline()
res = res + s
s = ws.readline()
res = res + s
s = ws.readline()
res = res + s
length = int(s.split(':')[1].strip())
s = ws.readline()
res = res + s
s = ws.read(length)
res = res + s
s = ws.readline()
res = res + s
yield res
return Response(
stream(),
mimetype="multipart/x-mixed-replace; boundary=ipcamera")
@app.route('/about/')
def about():
return render_template("about.html")
| ddmendes/R3V-Remote3DViewer | site/app/views.py | Python | mit | 4,879 |
from _ctypes.basics import _CData, _CDataMeta, cdata_from_address
from _ctypes.primitive import SimpleType, _SimpleCData
from _ctypes.basics import ArgumentError, keepalive_key
from _ctypes.basics import is_struct_shape
from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error
import _rawffi
import _ffi
import sys
import traceback
try: from __pypy__ import builtinify
except ImportError: builtinify = lambda f: f
# XXX this file needs huge refactoring I fear
PARAMFLAG_FIN = 0x1
PARAMFLAG_FOUT = 0x2
PARAMFLAG_FLCID = 0x4
PARAMFLAG_COMBINED = PARAMFLAG_FIN | PARAMFLAG_FOUT | PARAMFLAG_FLCID
VALID_PARAMFLAGS = (
0,
PARAMFLAG_FIN,
PARAMFLAG_FIN | PARAMFLAG_FOUT,
PARAMFLAG_FIN | PARAMFLAG_FLCID
)
WIN64 = sys.platform == 'win32' and sys.maxint == 2**63 - 1
def get_com_error(errcode, riid, pIunk):
"Win32 specific: build a COM Error exception"
# XXX need C support code
from _ctypes import COMError
return COMError(errcode, None, None)
@builtinify
def call_function(func, args):
"Only for debugging so far: So that we can call CFunction instances"
funcptr = CFuncPtr(func)
funcptr.restype = int
return funcptr(*args)
class CFuncPtrType(_CDataMeta):
# XXX write down here defaults and such things
def _sizeofinstances(self):
return _rawffi.sizeof('P')
def _alignmentofinstances(self):
return _rawffi.alignment('P')
def _is_pointer_like(self):
return True
from_address = cdata_from_address
class CFuncPtr(_CData):
__metaclass__ = CFuncPtrType
_argtypes_ = None
_restype_ = None
_errcheck_ = None
_flags_ = 0
_ffiargshape = 'P'
_ffishape = 'P'
_fficompositesize = None
_ffiarray = _rawffi.Array('P')
_needs_free = False
callable = None
_ptr = None
_buffer = None
_address = None
# win32 COM properties
_paramflags = None
_com_index = None
_com_iid = None
_is_fastpath = False
def _getargtypes(self):
return self._argtypes_
def _setargtypes(self, argtypes):
self._ptr = None
if argtypes is None:
self._argtypes_ = ()
else:
for i, argtype in enumerate(argtypes):
if not hasattr(argtype, 'from_param'):
raise TypeError(
"item %d in _argtypes_ has no from_param method" % (
i + 1,))
self._argtypes_ = list(argtypes)
self._check_argtypes_for_fastpath()
argtypes = property(_getargtypes, _setargtypes)
def _check_argtypes_for_fastpath(self):
if all([hasattr(argtype, '_ffiargshape') for argtype in self._argtypes_]):
fastpath_cls = make_fastpath_subclass(self.__class__)
fastpath_cls.enable_fastpath_maybe(self)
def _getparamflags(self):
return self._paramflags
def _setparamflags(self, paramflags):
if paramflags is None or not self._argtypes_:
self._paramflags = None
return
if not isinstance(paramflags, tuple):
raise TypeError("paramflags must be a tuple or None")
if len(paramflags) != len(self._argtypes_):
raise ValueError("paramflags must have the same length as argtypes")
for idx, paramflag in enumerate(paramflags):
paramlen = len(paramflag)
name = default = None
if paramlen == 1:
flag = paramflag[0]
elif paramlen == 2:
flag, name = paramflag
elif paramlen == 3:
flag, name, default = paramflag
else:
raise TypeError(
"paramflags must be a sequence of (int [,string [,value]]) "
"tuples"
)
if not isinstance(flag, int):
raise TypeError(
"paramflags must be a sequence of (int [,string [,value]]) "
"tuples"
)
_flag = flag & PARAMFLAG_COMBINED
if _flag == PARAMFLAG_FOUT:
typ = self._argtypes_[idx]
if getattr(typ, '_ffiargshape', None) not in ('P', 'z', 'Z'):
raise TypeError(
"'out' parameter %d must be a pointer type, not %s"
% (idx+1, type(typ).__name__)
)
elif _flag not in VALID_PARAMFLAGS:
raise TypeError("paramflag value %d not supported" % flag)
self._paramflags = paramflags
paramflags = property(_getparamflags, _setparamflags)
def _getrestype(self):
return self._restype_
def _setrestype(self, restype):
self._ptr = None
if restype is int:
from ctypes import c_int
restype = c_int
if not (isinstance(restype, _CDataMeta) or restype is None or
callable(restype)):
raise TypeError("restype must be a type, a callable, or None")
self._restype_ = restype
def _delrestype(self):
self._ptr = None
del self._restype_
restype = property(_getrestype, _setrestype, _delrestype)
def _geterrcheck(self):
return getattr(self, '_errcheck_', None)
def _seterrcheck(self, errcheck):
if not callable(errcheck):
raise TypeError("The errcheck attribute must be callable")
self._errcheck_ = errcheck
def _delerrcheck(self):
try:
del self._errcheck_
except AttributeError:
pass
errcheck = property(_geterrcheck, _seterrcheck, _delerrcheck)
def _ffishapes(self, args, restype):
if args is None:
args = []
argtypes = [arg._ffiargshape for arg in args]
if restype is not None:
if not isinstance(restype, SimpleType):
raise TypeError("invalid result type for callback function")
restype = restype._ffiargshape
else:
restype = 'O' # void
return argtypes, restype
def _set_address(self, address):
if not self._buffer:
self._buffer = _rawffi.Array('P')(1)
self._buffer[0] = address
def _get_address(self):
return self._buffer[0]
def __init__(self, *args):
self.name = None
self._objects = {keepalive_key(0):self}
self._needs_free = True
# Empty function object -- this is needed for casts
if not args:
self._set_address(0)
return
argsl = list(args)
argument = argsl.pop(0)
# Direct construction from raw address
if isinstance(argument, (int, long)) and not argsl:
self._set_address(argument)
restype = self._restype_
if restype is None:
import ctypes
restype = ctypes.c_int
self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype)
self._check_argtypes_for_fastpath()
return
# A callback into python
if callable(argument) and not argsl:
self.callable = argument
ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_)
if self._restype_ is None:
ffires = None
self._ptr = _rawffi.CallbackPtr(self._wrap_callable(argument,
self.argtypes),
ffiargs, ffires, self._flags_)
self._buffer = self._ptr.byptr()
return
# Function exported from a shared library
if isinstance(argument, tuple) and len(argument) == 2:
import ctypes
self.name, dll = argument
if isinstance(dll, str):
self.dll = ctypes.CDLL(self.dll)
else:
self.dll = dll
if argsl:
self.paramflags = argsl.pop(0)
if argsl:
raise TypeError("Unknown constructor %s" % (args,))
# We need to check dll anyway
ptr = self._getfuncptr([], ctypes.c_int)
self._set_address(ptr.getaddr())
return
# A COM function call, by index
if (sys.platform == 'win32' and isinstance(argument, (int, long))
and argsl):
ffiargs, ffires = self._ffishapes(self._argtypes_, self._restype_)
self._com_index = argument + 0x1000
self.name = argsl.pop(0)
if argsl:
self.paramflags = argsl.pop(0)
if argsl:
self._com_iid = argsl.pop(0)
if argsl:
raise TypeError("Unknown constructor %s" % (args,))
return
raise TypeError("Unknown constructor %s" % (args,))
def _wrap_callable(self, to_call, argtypes):
def f(*args):
if argtypes:
args = [argtype._CData_retval(argtype.from_address(arg)._buffer)
for argtype, arg in zip(argtypes, args)]
return to_call(*args)
return f
def __call__(self, *args, **kwargs):
argtypes = self._argtypes_
if self.callable is not None:
if len(args) == len(argtypes):
pass
elif self._flags_ & _rawffi.FUNCFLAG_CDECL:
if len(args) < len(argtypes):
plural = len(argtypes) > 1 and "s" or ""
raise TypeError(
"This function takes at least %d argument%s (%s given)"
% (len(argtypes), plural, len(args)))
else:
# For cdecl functions, we allow more actual arguments
# than the length of the argtypes tuple.
args = args[:len(self._argtypes_)]
else:
plural = len(self._argtypes_) > 1 and "s" or ""
raise TypeError(
"This function takes %d argument%s (%s given)"
% (len(self._argtypes_), plural, len(args)))
try:
newargs = self._convert_args_for_callback(argtypes, args)
except (UnicodeError, TypeError, ValueError), e:
raise ArgumentError(str(e))
try:
res = self.callable(*newargs)
except:
exc_info = sys.exc_info()
traceback.print_tb(exc_info[2], file=sys.stderr)
print >>sys.stderr, "%s: %s" % (exc_info[0].__name__, exc_info[1])
return 0
if self._restype_ is not None:
return res
return
if argtypes is None:
# XXX this warning was originally meaning "it's going to be
# really slow". Now we don't worry that much about slowness
# of ctypes, and it's strange to get warnings for perfectly-
# legal code.
#warnings.warn('C function without declared arguments called',
# RuntimeWarning, stacklevel=2)
argtypes = []
if self._com_index:
from ctypes import cast, c_void_p, POINTER
if not args:
raise ValueError(
"native COM method call without 'this' parameter"
)
thisarg = cast(args[0], POINTER(POINTER(c_void_p)))
keepalives, newargs, argtypes, outargs = self._convert_args(argtypes,
args[1:], kwargs)
newargs.insert(0, args[0].value)
argtypes.insert(0, c_void_p)
else:
thisarg = None
keepalives, newargs, argtypes, outargs = self._convert_args(argtypes,
args, kwargs)
funcptr = self._getfuncptr(argtypes, self._restype_, thisarg)
result = self._call_funcptr(funcptr, *newargs)
result = self._do_errcheck(result, args)
if not outargs:
return result
from ctypes import c_void_p
simple_cdata = type(c_void_p()).__bases__[0]
outargs = [x.value if type(x).__bases__[0] is simple_cdata else x
for x in outargs]
if len(outargs) == 1:
return outargs[0]
return tuple(outargs)
def _call_funcptr(self, funcptr, *newargs):
if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO:
tmp = _rawffi.get_errno()
_rawffi.set_errno(get_errno())
set_errno(tmp)
if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR:
tmp = _rawffi.get_last_error()
_rawffi.set_last_error(get_last_error())
set_last_error(tmp)
try:
result = funcptr(*newargs)
finally:
if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO:
tmp = _rawffi.get_errno()
_rawffi.set_errno(get_errno())
set_errno(tmp)
if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR:
tmp = _rawffi.get_last_error()
_rawffi.set_last_error(get_last_error())
set_last_error(tmp)
#
try:
return self._build_result(self._restype_, result, newargs)
finally:
funcptr.free_temp_buffers()
def _do_errcheck(self, result, args):
# The 'errcheck' protocol
if self._errcheck_:
v = self._errcheck_(result, self, args)
# If the errcheck funtion failed, let it throw
# If the errcheck function returned newargs unchanged,
# continue normal processing.
# If the errcheck function returned something else,
# use that as result.
if v is not args:
return v
return result
def _getfuncptr_fromaddress(self, argtypes, restype):
address = self._get_address()
ffiargs = [argtype.get_ffi_argtype() for argtype in argtypes]
ffires = restype.get_ffi_argtype()
return _ffi.FuncPtr.fromaddr(address, '', ffiargs, ffires, self._flags_)
def _getfuncptr(self, argtypes, restype, thisarg=None):
if self._ptr is not None and (argtypes is self._argtypes_ or argtypes == self._argtypes_):
return self._ptr
if restype is None or not isinstance(restype, _CDataMeta):
import ctypes
restype = ctypes.c_int
if self._buffer is not None:
ptr = self._getfuncptr_fromaddress(argtypes, restype)
if argtypes == self._argtypes_:
self._ptr = ptr
return ptr
if self._com_index:
# extract the address from the object's virtual table
if not thisarg:
raise ValueError("COM method call without VTable")
ptr = thisarg[0][self._com_index - 0x1000]
ffiargs = [argtype.get_ffi_argtype() for argtype in argtypes]
ffires = restype.get_ffi_argtype()
return _ffi.FuncPtr.fromaddr(ptr, '', ffiargs, ffires, self._flags_)
cdll = self.dll._handle
try:
ffi_argtypes = [argtype.get_ffi_argtype() for argtype in argtypes]
ffi_restype = restype.get_ffi_argtype()
self._ptr = cdll.getfunc(self.name, ffi_argtypes, ffi_restype)
return self._ptr
except AttributeError:
if self._flags_ & _rawffi.FUNCFLAG_CDECL:
raise
# Win64 has no stdcall calling conv, so it should also not have the
# name mangling of it.
if WIN64:
raise
# For stdcall, try mangled names:
# funcname -> _funcname@<n>
# where n is 0, 4, 8, 12, ..., 128
for i in range(33):
mangled_name = "_%s@%d" % (self.name, i*4)
try:
return cdll.getfunc(mangled_name,
ffi_argtypes, ffi_restype,
# XXX self._flags_
)
except AttributeError:
pass
raise
@classmethod
def _conv_param(cls, argtype, arg):
if argtype is not None:
arg = argtype.from_param(arg)
if hasattr(arg, '_as_parameter_'):
arg = arg._as_parameter_
if isinstance(arg, _CData):
return arg, arg._to_ffi_param(), type(arg)
#
# non-usual case: we do the import here to save a lot of code in the
# jit trace of the normal case
from ctypes import c_char_p, c_wchar_p, c_void_p, c_int
#
if isinstance(arg, str):
cobj = c_char_p(arg)
elif isinstance(arg, unicode):
cobj = c_wchar_p(arg)
elif arg is None:
cobj = c_void_p()
elif isinstance(arg, (int, long)):
cobj = c_int(arg)
else:
raise TypeError("Don't know how to handle %s" % (arg,))
return cobj, cobj._to_ffi_param(), type(cobj)
def _convert_args_for_callback(self, argtypes, args):
assert len(argtypes) == len(args)
newargs = []
for argtype, arg in zip(argtypes, args):
param = argtype.from_param(arg)
_type_ = getattr(argtype, '_type_', None)
if _type_ == 'P': # special-case for c_void_p
param = param._get_buffer_value()
elif self._is_primitive(argtype):
param = param.value
newargs.append(param)
return newargs
def _convert_args(self, argtypes, args, kwargs, marker=object()):
newargs = []
outargs = []
keepalives = []
newargtypes = []
total = len(args)
paramflags = self._paramflags
inargs_idx = 0
if not paramflags and total < len(argtypes):
raise TypeError("not enough arguments")
for i, argtype in enumerate(argtypes):
flag = 0
name = None
defval = marker
if paramflags:
paramflag = paramflags[i]
paramlen = len(paramflag)
name = None
if paramlen == 1:
flag = paramflag[0]
elif paramlen == 2:
flag, name = paramflag
elif paramlen == 3:
flag, name, defval = paramflag
flag = flag & PARAMFLAG_COMBINED
if flag == PARAMFLAG_FIN | PARAMFLAG_FLCID:
val = defval
if val is marker:
val = 0
keepalive, newarg, newargtype = self._conv_param(argtype, val)
keepalives.append(keepalive)
newargs.append(newarg)
newargtypes.append(newargtype)
elif flag in (0, PARAMFLAG_FIN):
if inargs_idx < total:
val = args[inargs_idx]
inargs_idx += 1
elif kwargs and name in kwargs:
val = kwargs[name]
inargs_idx += 1
elif defval is not marker:
val = defval
elif name:
raise TypeError("required argument '%s' missing" % name)
else:
raise TypeError("not enough arguments")
keepalive, newarg, newargtype = self._conv_param(argtype, val)
keepalives.append(keepalive)
newargs.append(newarg)
newargtypes.append(newargtype)
elif flag == PARAMFLAG_FOUT:
if defval is not marker:
outargs.append(defval)
keepalive, newarg, newargtype = self._conv_param(argtype, defval)
else:
import ctypes
val = argtype._type_()
outargs.append(val)
keepalive = None
newarg = ctypes.byref(val)
newargtype = type(newarg)
keepalives.append(keepalive)
newargs.append(newarg)
newargtypes.append(newargtype)
else:
raise ValueError("paramflag %d not yet implemented" % flag)
else:
try:
keepalive, newarg, newargtype = self._conv_param(argtype, args[i])
except (UnicodeError, TypeError, ValueError), e:
raise ArgumentError(str(e))
keepalives.append(keepalive)
newargs.append(newarg)
newargtypes.append(newargtype)
inargs_idx += 1
if len(newargs) < len(args):
extra = args[len(newargs):]
for i, arg in enumerate(extra):
try:
keepalive, newarg, newargtype = self._conv_param(None, arg)
except (UnicodeError, TypeError, ValueError), e:
raise ArgumentError(str(e))
keepalives.append(keepalive)
newargs.append(newarg)
newargtypes.append(newargtype)
return keepalives, newargs, newargtypes, outargs
@staticmethod
def _is_primitive(argtype):
return argtype.__bases__[0] is _SimpleCData
def _wrap_result(self, restype, result):
"""
Convert from low-level repr of the result to the high-level python
one.
"""
# hack for performance: if restype is a "simple" primitive type, don't
# allocate the buffer because it's going to be thrown away immediately
if self._is_primitive(restype) and not restype._is_pointer_like():
return result
#
shape = restype._ffishape
if is_struct_shape(shape):
buf = result
else:
buf = _rawffi.Array(shape)(1, autofree=True)
buf[0] = result
retval = restype._CData_retval(buf)
return retval
def _build_result(self, restype, result, argsandobjs):
"""Build the function result:
If there is no OUT parameter, return the actual function result
If there is one OUT parameter, return it
If there are many OUT parameters, return a tuple"""
# XXX: note for the future: the function used to take a "resbuffer",
# i.e. an array of ints. Now it takes a result, which is already a
# python object. All places that do "resbuffer[0]" should check that
# result is actually an int and just use it.
#
# Also, argsandobjs used to be "args" in __call__, now it's "newargs"
# (i.e., the already unwrapped objects). It's used only when we have a
# PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a
# failing test
retval = None
if restype is not None:
checker = getattr(self.restype, '_check_retval_', None)
if checker:
val = restype(result)
# the original ctypes seems to make the distinction between
# classes defining a new type, and their subclasses
if '_type_' in restype.__dict__:
val = val.value
# XXX Raise a COMError when restype is HRESULT and
# checker(val) fails. How to check for restype == HRESULT?
if self._com_index:
if result & 0x80000000:
raise get_com_error(result, None, None)
else:
retval = checker(val)
elif not isinstance(restype, _CDataMeta):
retval = restype(result)
else:
retval = self._wrap_result(restype, result)
return retval
def __nonzero__(self):
return self._com_index is not None or bool(self._buffer[0])
def __del__(self):
if self._needs_free:
# XXX we need to find a bad guy here
if self._buffer is None:
return
self._buffer.free()
self._buffer = None
if isinstance(self._ptr, _rawffi.CallbackPtr):
self._ptr.free()
self._ptr = None
self._needs_free = False
def make_fastpath_subclass(CFuncPtr):
if CFuncPtr._is_fastpath:
return CFuncPtr
#
try:
return make_fastpath_subclass.memo[CFuncPtr]
except KeyError:
pass
class CFuncPtrFast(CFuncPtr):
_is_fastpath = True
_slowpath_allowed = True # set to False by tests
@classmethod
def enable_fastpath_maybe(cls, obj):
if (obj.callable is None and
obj._com_index is None):
obj.__class__ = cls
def __rollback(self):
assert self._slowpath_allowed
self.__class__ = CFuncPtr
# disable the fast path if we reset argtypes
def _setargtypes(self, argtypes):
self.__rollback()
self._setargtypes(argtypes)
argtypes = property(CFuncPtr._getargtypes, _setargtypes)
def _setcallable(self, func):
self.__rollback()
self.callable = func
callable = property(lambda x: None, _setcallable)
def _setcom_index(self, idx):
self.__rollback()
self._com_index = idx
_com_index = property(lambda x: None, _setcom_index)
def __call__(self, *args):
thisarg = None
argtypes = self._argtypes_
restype = self._restype_
funcptr = self._getfuncptr(argtypes, restype, thisarg)
try:
result = self._call_funcptr(funcptr, *args)
result = self._do_errcheck(result, args)
except (TypeError, ArgumentError, UnicodeDecodeError):
assert self._slowpath_allowed
return CFuncPtr.__call__(self, *args)
return result
make_fastpath_subclass.memo[CFuncPtr] = CFuncPtrFast
return CFuncPtrFast
make_fastpath_subclass.memo = {}
| bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib_pypy/_ctypes/function.py | Python | mit | 26,457 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import six
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.views import password_change
from django.http import HttpResponseNotFound
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView, TemplateView
from registration.signals import user_registered
from shuup import configuration
from shuup.core.models import (
get_company_contact, get_person_contact, MutableAddress, SavedAddress
)
from shuup.front.views.dashboard import DashboardViewMixin
from shuup.utils.form_group import FormGroup
from shuup.utils.importing import cached_load
from .forms import CompanyContactForm, PersonContactForm, SavedAddressForm
from .notify_events import CompanyAccountCreated
class PasswordChangeView(DashboardViewMixin, TemplateView):
template_name = "shuup/customer_information/change_password.jinja"
def post(self, *args, **kwargs):
response = password_change(
self.request,
post_change_redirect="shuup:customer_edit",
template_name=self.template_name
)
if response.status_code == 302:
messages.success(self.request, _("Password successfully changed."))
return response
def get_context_data(self, **kwargs):
context = super(PasswordChangeView, self).get_context_data(**kwargs)
context["form"] = PasswordChangeForm(user=self.request.user)
return context
class CustomerEditView(DashboardViewMixin, FormView):
template_name = "shuup/customer_information/edit_customer.jinja"
def get_form(self, form_class):
contact = get_person_contact(self.request.user)
form_group = FormGroup(**self.get_form_kwargs())
address_form_class = cached_load("SHUUP_ADDRESS_MODEL_FORM")
form_group.add_form_def("billing", address_form_class, kwargs={"instance": contact.default_billing_address})
form_group.add_form_def("shipping", address_form_class, kwargs={"instance": contact.default_shipping_address})
form_group.add_form_def("contact", PersonContactForm, kwargs={"instance": contact})
return form_group
def form_valid(self, form):
contact = form["contact"].save()
user = contact.user
billing_address = form["billing"].save()
shipping_address = form["shipping"].save()
if billing_address.pk != contact.default_billing_address_id: # Identity changed due to immutability
contact.default_billing_address = billing_address
if shipping_address.pk != contact.default_shipping_address_id: # Identity changed due to immutability
contact.default_shipping_address = shipping_address
if not bool(get_company_contact(self.request.user)): # Only update user details for non-company members
user.email = contact.email
user.first_name = contact.first_name
user.last_name = contact.last_name
user.save()
contact.save()
messages.success(self.request, _("Account information saved successfully."))
return redirect("shuup:customer_edit")
class CompanyEditView(DashboardViewMixin, FormView):
template_name = "shuup/customer_information/edit_company.jinja"
def dispatch(self, request, *args, **kwargs):
if not configuration.get(None, "allow_company_registration"):
return HttpResponseNotFound()
return super(CompanyEditView, self).dispatch(request, *args, **kwargs)
def get_form(self, form_class):
user = self.request.user
company = get_company_contact(user)
person = get_person_contact(user)
form_group = FormGroup(**self.get_form_kwargs())
address_form_class = cached_load("SHUUP_ADDRESS_MODEL_FORM")
form_group.add_form_def(
"billing",
address_form_class,
kwargs={
"instance": _get_default_address_for_contact(company, "default_billing_address", person)
}
)
form_group.add_form_def(
"shipping",
address_form_class,
kwargs={
"instance": _get_default_address_for_contact(company, "default_shipping_address", person)
}
)
form_group.add_form_def("contact", CompanyContactForm, kwargs={"instance": company})
return form_group
def form_valid(self, form):
company = form["contact"].save(commit=False)
is_new = not bool(company.pk)
company.save()
user = self.request.user
person = get_person_contact(user)
company.members.add(person)
billing_address = form["billing"].save()
shipping_address = form["shipping"].save()
if billing_address.pk != company.default_billing_address_id: # Identity changed due to immutability
company.default_billing_address = billing_address
if shipping_address.pk != company.default_shipping_address_id: # Identity changed due to immutability
company.default_shipping_address = shipping_address
user.email = company.email
user.first_name = company.name
user.last_name = ""
user.save()
message = _("Company information saved successfully.")
# If company registration requires activation,
# company will be created as inactive.
if is_new and configuration.get(None, "company_registration_requires_approval"):
company.is_active = False
message = _("Company information saved successfully. "
"Please follow the instructions sent to your email address.")
company.save()
if is_new:
user_registered.send(sender=self.__class__,
user=self.request.user,
request=self.request)
CompanyAccountCreated(contact=company, customer_email=company.email).run()
messages.success(self.request, message)
return redirect("shuup:company_edit")
class AddressBookView(DashboardViewMixin, TemplateView):
template_name = "shuup/customer_information/addressbook/index.jinja"
def get_context_data(self, **kwargs):
context = super(AddressBookView, self).get_context_data(**kwargs)
context["addresses"] = SavedAddress.objects.filter(owner=self.request.customer)
context["customer"] = self.request.customer
return context
class AddressBookEditView(DashboardViewMixin, FormView):
template_name = "shuup/customer_information/addressbook/edit.jinja"
form_class = SavedAddressForm
instance = None
def dispatch(self, request, *args, **kwargs):
try:
self.instance = SavedAddress.objects.get(pk=kwargs.get("pk", 0), owner=self.request.customer)
except:
self.instance = None
return super(AddressBookEditView, self).dispatch(request, *args, **kwargs)
def get_form(self, form_class):
form_group = FormGroup(**self.get_form_kwargs())
address_kwargs = {}
saved_address_kwargs = {}
if self.instance:
address_kwargs["instance"] = self.instance.address
saved_address_kwargs["initial"] = {
"role": self.instance.role,
"status": self.instance.status,
"title": self.instance.title,
}
form_group.add_form_def("address", cached_load("SHUUP_ADDRESS_MODEL_FORM"), kwargs=address_kwargs)
form_group.add_form_def(
"saved_address",
SavedAddressForm,
kwargs=saved_address_kwargs
)
return form_group
def form_valid(self, form):
address_form = form["address"]
if self.instance:
# expect old
address = MutableAddress.objects.get(pk=self.instance.address.pk)
for k, v in six.iteritems(address_form.cleaned_data):
setattr(address, k, v)
address.save()
else:
address = address_form.save()
owner = self.request.customer
saf = form["saved_address"]
saved_address, updated = SavedAddress.objects.update_or_create(
owner=owner,
address=address,
defaults={
"title": saf.cleaned_data.get("title"),
"role": saf.cleaned_data.get("role"),
"status": saf.cleaned_data.get("status")
}
)
messages.success(self.request, _("Address information saved successfully."))
return redirect("shuup:address_book_edit", pk=saved_address.pk)
def delete_address(request, pk):
try:
SavedAddress.objects.get(pk=pk, owner=request.customer).delete()
except SavedAddress.DoesNotExist:
messages.error(request, _("Cannot remove address"))
return redirect("shuup:address_book")
def _get_default_address_for_contact(contact, address_attr, fallback_contact):
if contact and getattr(contact, address_attr, None):
return getattr(contact, address_attr)
if fallback_contact and getattr(fallback_contact, address_attr, None):
return getattr(fallback_contact, address_attr)
return None
| suutari-ai/shoop | shuup/front/apps/customer_information/views.py | Python | agpl-3.0 | 9,524 |
"""
Tests for Term.
"""
from itertools import product
from unittest import TestCase
from numpy import (
float32,
uint32,
uint8,
)
from zipline.errors import (
InputTermNotAtomic,
TermInputsNotSpecified,
WindowLengthNotSpecified,
)
from zipline.pipeline import Factor, TermGraph
from zipline.pipeline.data import Column, DataSet
from zipline.pipeline.term import AssetExists, NotSpecified
from zipline.pipeline.expression import NUMEXPR_MATH_FUNCS
class SomeDataSet(DataSet):
foo = Column(float32)
bar = Column(uint32)
buzz = Column(uint8)
class SomeFactor(Factor):
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
class NoLookbackFactor(Factor):
window_length = 0
class SomeOtherFactor(Factor):
window_length = 5
inputs = [SomeDataSet.bar, SomeDataSet.buzz]
SomeFactorAlias = SomeFactor
def gen_equivalent_factors():
"""
Return an iterator of SomeFactor instances that should all be the same
object.
"""
yield SomeFactor()
yield SomeFactor(inputs=NotSpecified)
yield SomeFactor(SomeFactor.inputs)
yield SomeFactor(inputs=SomeFactor.inputs)
yield SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
yield SomeFactor(window_length=SomeFactor.window_length)
yield SomeFactor(window_length=NotSpecified)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=NotSpecified,
)
yield SomeFactor(
[SomeDataSet.foo, SomeDataSet.bar],
window_length=SomeFactor.window_length,
)
yield SomeFactorAlias()
def to_dict(l):
"""
Convert a list to a dict with keys drawn from '0', '1', '2', ...
Example
-------
>>> to_dict([2, 3, 4])
{'0': 2, '1': 3, '2': 4}
"""
return dict(zip(map(str, range(len(l))), l))
class DependencyResolutionTestCase(TestCase):
def check_dependency_order(self, ordered_terms):
seen = set()
for term in ordered_terms:
for dep in term.dependencies:
self.assertIn(dep, seen)
seen.add(term)
def test_single_factor(self):
"""
Test dependency resolution for a single factor.
"""
def check_output(graph):
resolution_order = list(graph.ordered())
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertIn(SomeDataSet.foo, resolution_order)
self.assertIn(SomeDataSet.bar, resolution_order)
self.assertIn(SomeFactor(), resolution_order)
self.assertEqual(graph.node[SomeDataSet.foo]['extra_rows'], 4)
self.assertEqual(graph.node[SomeDataSet.bar]['extra_rows'], 4)
for foobar in gen_equivalent_factors():
check_output(TermGraph(to_dict([foobar])))
def test_single_factor_instance_args(self):
"""
Test dependency resolution for a single factor with arguments passed to
the constructor.
"""
bar, buzz = SomeDataSet.bar, SomeDataSet.buzz
graph = TermGraph(to_dict([SomeFactor([bar, buzz], window_length=5)]))
resolution_order = list(graph.ordered())
# SomeFactor, its inputs, and AssetExists()
self.assertEqual(len(resolution_order), 4)
self.check_dependency_order(resolution_order)
self.assertIn(AssetExists(), resolution_order)
self.assertEqual(graph.extra_rows[AssetExists()], 4)
self.assertIn(bar, resolution_order)
self.assertIn(buzz, resolution_order)
self.assertIn(SomeFactor([bar, buzz], window_length=5),
resolution_order)
self.assertEqual(graph.extra_rows[bar], 4)
self.assertEqual(graph.extra_rows[buzz], 4)
def test_reuse_atomic_terms(self):
"""
Test that raw inputs only show up in the dependency graph once.
"""
f1 = SomeFactor([SomeDataSet.foo, SomeDataSet.bar])
f2 = SomeOtherFactor([SomeDataSet.bar, SomeDataSet.buzz])
graph = TermGraph(to_dict([f1, f2]))
resolution_order = list(graph.ordered())
# bar should only appear once.
self.assertEqual(len(resolution_order), 6)
self.assertEqual(len(set(resolution_order)), 6)
self.check_dependency_order(resolution_order)
def test_disallow_recursive_lookback(self):
with self.assertRaises(InputTermNotAtomic):
SomeFactor(inputs=[SomeFactor(), SomeDataSet.foo])
class ObjectIdentityTestCase(TestCase):
def assertSameObject(self, *objs):
first = objs[0]
for obj in objs:
self.assertIs(first, obj)
def test_instance_caching(self):
self.assertSameObject(*gen_equivalent_factors())
self.assertIs(
SomeFactor(window_length=SomeFactor.window_length + 1),
SomeFactor(window_length=SomeFactor.window_length + 1),
)
self.assertIs(
SomeFactor(dtype=int),
SomeFactor(dtype=int),
)
self.assertIs(
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
def test_instance_non_caching(self):
f = SomeFactor()
# Different window_length.
self.assertIsNot(
f,
SomeFactor(window_length=SomeFactor.window_length + 1),
)
# Different dtype
self.assertIsNot(
f,
SomeFactor(dtype=int)
)
# Reordering inputs changes semantics.
self.assertIsNot(
f,
SomeFactor(inputs=[SomeFactor.inputs[1], SomeFactor.inputs[0]]),
)
def test_instance_non_caching_redefine_class(self):
orig_foobar_instance = SomeFactorAlias()
class SomeFactor(Factor):
window_length = 5
inputs = [SomeDataSet.foo, SomeDataSet.bar]
self.assertIsNot(orig_foobar_instance, SomeFactor())
def test_instance_caching_binops(self):
f = SomeFactor()
g = SomeOtherFactor()
for lhs, rhs in product([f, g], [f, g]):
self.assertIs((lhs + rhs), (lhs + rhs))
self.assertIs((lhs - rhs), (lhs - rhs))
self.assertIs((lhs * rhs), (lhs * rhs))
self.assertIs((lhs / rhs), (lhs / rhs))
self.assertIs((lhs ** rhs), (lhs ** rhs))
self.assertIs((1 + rhs), (1 + rhs))
self.assertIs((rhs + 1), (rhs + 1))
self.assertIs((1 - rhs), (1 - rhs))
self.assertIs((rhs - 1), (rhs - 1))
self.assertIs((2 * rhs), (2 * rhs))
self.assertIs((rhs * 2), (rhs * 2))
self.assertIs((2 / rhs), (2 / rhs))
self.assertIs((rhs / 2), (rhs / 2))
self.assertIs((2 ** rhs), (2 ** rhs))
self.assertIs((rhs ** 2), (rhs ** 2))
self.assertIs((f + g) + (f + g), (f + g) + (f + g))
def test_instance_caching_unary_ops(self):
f = SomeFactor()
self.assertIs(-f, -f)
self.assertIs(--f, --f)
self.assertIs(---f, ---f)
def test_instance_caching_math_funcs(self):
f = SomeFactor()
for funcname in NUMEXPR_MATH_FUNCS:
method = getattr(f, funcname)
self.assertIs(method(), method())
def test_bad_input(self):
class SomeFactor(Factor):
pass
class SomeFactorDefaultInputs(Factor):
inputs = (SomeDataSet.foo, SomeDataSet.bar)
class SomeFactorDefaultLength(Factor):
window_length = 10
with self.assertRaises(TermInputsNotSpecified):
SomeFactor(window_length=1)
with self.assertRaises(TermInputsNotSpecified):
SomeFactorDefaultLength()
with self.assertRaises(WindowLengthNotSpecified):
SomeFactor(inputs=(SomeDataSet.foo,))
with self.assertRaises(WindowLengthNotSpecified):
SomeFactorDefaultInputs()
| ChinaQuants/zipline | tests/pipeline/test_term.py | Python | apache-2.0 | 8,036 |
import json
import logging
from community_csdt.src.models.accounts import account
from community_csdt.src.models.classes import classroom
from community_csdt.src.models.galleries import gallery
from community_csdt.src.models.login import login
from community_csdt.src.models.logout import logout
from community_csdt.src.models.pages import page
from community_csdt.src.models.projects import project
from community_csdt.src.models.recover import recover
from community_csdt.src.models.register import register
from community_csdt.src.models.upload import upload
class Root(object):
__name__ = None
__parent__ = None
def __init__(self, request):
self.request = request
def __getitem__(self, key):
log = logging.getLogger('csdt')
log.info("Root.__init__()")
log.debug("key = %s" % key)
if key == "accounts":
return account.Account(self, key)
elif key == "classes":
return classroom.Classroom(self, key)
elif key == "galleries":
return gallery.Gallery(self, key)
elif key == "login":
return login.Login(self, key)
elif key == "logout":
return logout.Logout(self, key)
elif key == "pages":
return page.Page(self, key)
elif key == "projects":
return project.Project(self, key)
elif key == "recover":
return recover.Recover(self, key)
elif key == "register":
return register.Register(self, key)
elif key == "upload":
return upload.Upload(self, key)
return self
| electricity345/community.csdt | src/community_csdt/community_csdt/src/models/root.py | Python | mit | 1,622 |
from distutils.core import setup
from os import path
ROOT = path.dirname(__file__)
README = path.join(ROOT, 'README.rst')
setup(
name='hurl',
py_modules=['hurl'],
url='https://github.com/oinopion/hurl',
author='Tomek Paczkowski & Aleksandra Sendecka',
author_email='tomek@hauru.eu',
version='2.1',
license='New BSD License',
long_description=open(README).read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| oinopion/hurl | setup.py | Python | bsd-3-clause | 827 |
import dns.resolver
import random
from time import sleep
def dns_query(search_term):
try:
my_resolver = dns.resolver.Resolver()
# List of public DNS Servers:
# https://www.lifewire.com/free-and-public-dns-servers-2626062
#
my_resolver.nameservers = ['8.8.8.8', '8.8.4.4', # Google
'209.244.0.3', '209.244.0.4', # Verisign
'64.6.64.6', '64.6.65.6', # Level3
'84.200.69.80', '84.200.70.40', # DNS.WATCH
'8.26.56.26', '8.20.247.20', # Comodo Secure DNS
'208.67.222.222', '208.67.220.220'] # Open DNS
ip = random.choice(my_resolver.query(search_term))
print("[+] Resolved %s to %s " % (search_term, ip))
sleep(2)
return ip
except:
print("[-] Could not resolve %s" % search_term)
return "Not resolved" | nethunteros/punter | dns_resolve.py | Python | unlicense | 985 |
# -*- coding: utf-8 -*-
#!/usr/bin/python3
import datetime
from scipy import stats
from HipDynamics.staging import *
class Analysis:
def __init__(self, pref):
self.pref = pref
self.data = []
@property
def pref(self):
return self.__pref
@pref.setter
def pref(self, input):
if input["transformToLogScale"]:
print("[WARN] In order to perform a log transformation, all negative\n" \
" values will be inverted and 0 values set to 1e-10.")
self.__pref = input
@property
def data(self):
return self.__data
@data.setter
def data(self, input):
self.__data = input
@property
def result(self):
return self.__resultData
def runDimensionalityReduction(self):
if len(self.data) < 2:
print("[WARN] dimensionalityReduction aborted. One or no data point submitted.\n"\
" It cannot be collapsed further.")
return []
self.setMissingValuesToZero()
if self.pref["transformToLogScale"]:
self.transformDataToLogScale()
medianData = self.computeMedians()
inputData = self.collapseIntoRegressionTable(medianData)
self.resultData = self.applyLinearRegression(inputData)
return self.resultData
def setMissingValuesToZero(self):
for i in range(len(self.data)):
keys = list(self.data[i].keys())
for key in keys:
vals = self.data[i][key]
for j in range(len(vals)):
if vals[j] == "": vals[j] = 0
self.data[i][key] = vals
def transformDataToLogScale(self):
for i in range(len(self.data)):
keys = list(self.data[i].keys())
for key in keys:
vals = numpy.array(self.data[i][key]).astype(float)
vals = [abs(val) for val in vals]
for j in range(len(vals)):
if vals[j] == 0: vals[j] = 1e-10
self.data[i][key] = numpy.log(vals).tolist()
def computeMedians(self):
medianData = self.data.copy()
for i in range(len(medianData)):
keys = list(medianData[i].keys())
for key in keys:
vals = numpy.array(medianData[i][key]).astype(float)
medianData[i][key] = [numpy.median(vals).tolist()]
return medianData
def collapseIntoRegressionTable(self, data):
keys = list(data[0].keys())
collapsedTable = LookUpTable()
collapsedTable.mapping = LookUpTable.generateLookUpTableMapFromList(0, keys)
for table in data:
dim = []
for key in keys:
dim.append(table[key][0])
collapsedTable.add(dim)
return collapsedTable.table
def applyLinearRegression(self, regressData):
keys = list(regressData.keys())
resultTable = LookUpTable()
resultTable.mapping = LookUpTable.generateLookUpTableMapFromList(0, keys)
result = []
for key in keys:
y = regressData[key]
x = range(len(y))
if len(x) > 1:
gradient, intercept, rValue, pValue, stdErr = stats.linregress(x, y)
result.append({
"gradient": gradient,
"intercept": intercept,
"rValue": rValue,
"pValue": pValue,
"stdErr": stdErr
})
else:
print("[WARN] The input vector for linear regression has less than 2 values. Make sure\n"\
" you selected the appropriate indexIteratorSelector. For more information\n"\
" consult the documentation.")
resultTable.add(result)
return resultTable.table
class AnalysisWrapper:
def __init__(self, lookUpTable):
if type(LookUpTable()) == type(lookUpTable):
self.table = lookUpTable
self.dataSource = self.table.sourceFeatureAccessInfo
self.columns = self.retrieveDataColumns()
self.indexGroupData = []
self.outputResultHeader = []
self.outputTable = []
else:
print("[ERROR] AnalysisWrapper only accepts tables of type {}".format(str(type(LookUpTable()))))
def retrieveDataColumns(self):
if self.dataSource != None and self.table.sourceFeaturePatternSelector != None:
if self.dataSource["type"] == "MySQL":
return self.querySelectiveColumnsFromMysql()
if self.dataSource["type"] == "CSV":
return self.querySelectiveColumnsFromCSV()
else:
print("[ERROR] AnalysisWrapper requires a table sourceFeatureAccessInfo.\n" \
" For more information, consult the documentation.")
sys.exit()
def querySelectiveColumnsFromMysql(self):
sqlInfo = self.dataSource["MySQL"]
query = self.createSelectiveColumnQueryForMySQL(sqlInfo, self.table.sourceFeaturePatternSelector)
db = pymysql.connect(sqlInfo["address"], sqlInfo["user"], sqlInfo["pwd"], sqlInfo["db"])
cursor = db.cursor()
try:
cursor.execute(query)
except:
print("[Error]: unable to fetch data from MySQL.")
else:
results = list(cursor.fetchall())
singleDresults = []
for result in results:
singleDresults.append(result[0])
return singleDresults
db.close()
def createSelectiveColumnQueryForMySQL(self, sqlInfo, whereSelector = None):
query = "select column_name from information_schema.columns where"\
" table_name='{}' and column_name like '%{}%';".format(sqlInfo["table"],
whereSelector)
return query
def querySelectiveColumnsFromCSV(self):
csvInfo = self.dataSource["CSV"]
path = csvInfo["path"] + "/" + csvInfo["fileName"]
with codecs.open(path, "r", encoding='utf-8', errors='ignore') as inputFile:
for i in range(csvInfo["rowOffset"]):
next(inputFile)
data = csv.DictReader(inputFile, delimiter=csvInfo["delimiter"])
self.dataInMemory = []
for result in data:
self.dataInMemory.append(result)
inputFile.close()
return list(self.dataInMemory[0].keys())
def retrieveDataOfNextIndexGroup(self):
idxs = self.table.nextIndexGroup()
if len(idxs) == 0:
return []
if self.dataSource["type"] == "MySQL":
self.indexGroupData = self.retrieveDataOfNextIndexGroupeFromMysql(idxs)
if self.dataSource["type"] == "CSV":
self.indexGroupData = self.retrieveDataOfNextIndexGroupeFromCSV(idxs)
print(self.formatMetadata(self.table.metadataOfRetrievedIndexGroup, len(self.indexGroupData)))
return self.indexGroupData
def retrieveDataOfNextIndexGroupeFromMysql(self, idxs):
data = []
for idxGroup in idxs:
data.append(self.queryDataFromMysql(idxGroup))
return data
def queryDataFromMysql(self, idxs):
sqlInfo = self.dataSource["MySQL"]
query = self.createDataQueryForMySQL(sqlInfo, idxs, self.columns)
db = pymysql.connect(sqlInfo["address"], sqlInfo["user"], sqlInfo["pwd"], sqlInfo["db"])
cursor = db.cursor()
try:
cursor.execute(query)
except:
print("[Error]: unable to fetch data from MySQL.")
else:
results = list(cursor.fetchall())
resultsTable = LookUpTable()
resultsTable.mapping = LookUpTable.generateLookUpTableMapFromList(0, self.columns)
for i in range(len(results)):
resultsTable.add(results[i])
return resultsTable.table
db.close()
def createDataQueryForMySQL(self, sqlInfo, idxs, columns):
columnsConcat = ", ".join(columns)
if type(idxs) is list:
idxsConcat = ", ".join(str(idx) for idx in idxs)
else:
idxsConcat = str(idxs)
query = "select {} from {} where {} in ({});".format(columnsConcat,
sqlInfo["table"],
sqlInfo["Index"],
idxsConcat)
return query
def retrieveDataOfNextIndexGroupeFromCSV(self, idxs):
data = []
for idxGroup in idxs:
data.append(self.queryDataFromCSV(idxGroup))
return data
def queryDataFromCSV(self, idxGroup):
csvInfo = self.dataSource["CSV"]
resultTable = LookUpTable()
resultTable.mapping = LookUpTable.generateLookUpTableMapFromList(0, self.columns)
if type(idxGroup) is list:
for idx in idxGroup:
resultList = self.arrangeResultsInColumnOrder(self.dataInMemory[idx])
resultTable.add(resultList)
else:
resultList = self.arrangeResultsInColumnOrder(self.dataInMemory[idxGroup])
resultTable.add(resultList)
return resultTable.table
def arrangeResultsInColumnOrder(self, dictionary):
resultList = []
for col in self.columns:
resultList.append(dictionary[col])
return resultList
def formatMetadata(self, meta, noOfDataPoints = None):
msg = "[√] "
for m in meta:
keys = list(m.keys())
value = m[keys[0]]
msg += "{}: {} -> ".format(keys[0], value)
msg += "n = {}".format(str(noOfDataPoints))
return msg
def runAnalysis(self, analysisPreferences):
print("\nHipDynamics Analysis\n====================\n")
analysis = Analysis(analysisPreferences)
result = self.nextAnalysisRun(analysis)
while result != None:
if result != []:
row = self.formatOutputRow(result, analysisPreferences["regressionMeasures"])
self.addRowToOutputTable(row, analysisPreferences["regressionMeasures"])
result = self.nextAnalysisRun(analysis)
print("\n*** ANALYSIS SUCCESSFUL ***\n")
def nextAnalysisRun(self, analysis):
data = self.retrieveDataOfNextIndexGroup()
if len(data) == 0: return None
analysis.data = data
result = analysis.runDimensionalityReduction()
return result
def formatOutputRow(self, result, measures):
if len(self.outputResultHeader) == 0:
self.outputResultHeader = list(result.keys())
metaVal = self.formatMetadataOutputRow()
resultVal = self.formatResultOutputRow(result, measures)
return metaVal + resultVal
def formatMetadataOutputRow(self):
row = []
for d in self.table.metadataOfRetrievedIndexGroup:
keys = list(d.keys())
row.append(d[keys[0]])
return row
def formatResultOutputRow(self, result, measures):
row = []
for measure in measures:
for key in self.outputResultHeader:
row.append(result[key][0][measure])
return row
def addRowToOutputTable(self, row, measures):
if len(self.outputTable) == 0:
header = self.getHeader(measures)
self.outputTable.append(header)
self.outputTable.append(row)
def getHeader(self, measures):
meta = self.getMetadataHeader()
result = self.getResultHeader(measures)
return meta + result
def getMetadataHeader(self):
header = []
for d in self.table.metadataOfRetrievedIndexGroup:
keys = list(d.keys())
header.append("{}-{}".format("index", keys[0]))
return header
def getResultHeader(self, measures):
row = []
for measure in measures:
for key in self.outputResultHeader:
row.append("{}-{}".format(measure, key))
return row
def writeOutputToCSV(self, outputPath):
path = "{}/HipDynamics_{}.csv".format(outputPath, datetime.datetime.now().strftime('%d-%m-%Y_%H-%M'))
with open(path, 'w', newline='') as csvfile:
outputWriter = csv.writer(csvfile, delimiter=',')
for row in self.outputTable:
outputWriter.writerow(row)
def __str__(self):
pass
| KHP-Informatics/sleepsight-analytics | HipDynamics/analysis.py | Python | apache-2.0 | 12,581 |
# -*- coding: utf-8 -*-
import logging
from speaklater import make_lazy_string
from quokka.modules.accounts.models import User
logger = logging.getLogger()
def lazy_str_setting(key, default=None):
from flask import current_app
return make_lazy_string(
lambda: current_app.config.get(key, default)
)
def get_current_user():
from flask.ext.security import current_user
try:
if not current_user.is_authenticated():
return None
except RuntimeError:
# Flask-Testing will fail
pass
try:
return User.objects.get(id=current_user.id)
except Exception as e:
logger.warning("No user found: %s" % e.message)
return None
| maurobaraldi/quokka | quokka/utils/__init__.py | Python | mit | 714 |
""" Module: IDL:omg.org/CosEventComm:1.0
Automagically generated by:-
The ORB called Fnorb v1.1.Return.of.Fnorb
"""
_FNORB_ID = "IDL:omg.org/CosEventComm:1.0"
# Fnorb modules.
import Fnorb.orb.CORBA
import Fnorb.orb.TypeManager
import Fnorb.orb.Util
class Disconnected(Fnorb.orb.CORBA.UserException):
""" Exception: IDL:omg.org/CosEventComm/Disconnected:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/Disconnected:1.0"
def __init__(self):
""" Constructor. """
return
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/Disconnected:1.0", "00000000000000160000004C000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F446973636F6E6E65637465643A312E300000000000000D446973636F6E6E65637465640000000000000000", Disconnected)
class PushConsumer(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PushConsumer:1.0"
def push(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PushConsumer/push:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Create a request object.
request = self._create_request("push", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
def disconnect_push_consumer(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PushConsumer/disconnect_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_push_consumer", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PushConsumer:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50757368436F6E73756D65723A312E300000000000000D50757368436F6E73756D657200", PushConsumer)
class PushSupplier(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PushSupplier:1.0"
def disconnect_push_supplier(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PushSupplier/disconnect_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_push_supplier", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PushSupplier:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50757368537570706C6965723A312E300000000000000D50757368537570706C69657200", PushSupplier)
class PullSupplier(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PullSupplier:1.0"
def pull(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullSupplier/pull:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_any)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Create a request object.
request = self._create_request("pull", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
def try_pull(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullSupplier/try_pull:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_any)
outputs.append(Fnorb.orb.CORBA.TC_boolean)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventComm/Disconnected:1.0"))
# Create a request object.
request = self._create_request("try_pull", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
def disconnect_pull_supplier(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullSupplier/disconnect_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_pull_supplier", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PullSupplier:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50756C6C537570706C6965723A312E300000000000000D50756C6C537570706C69657200", PullSupplier)
class PullConsumer(Fnorb.orb.CORBA.Object):
""" Interface: IDL:omg.org/CosEventComm/PullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosEventComm/PullConsumer:1.0"
def disconnect_pull_consumer(self, *args, **kw):
""" Operation: IDL:omg.org/CosEventComm/PullConsumer/disconnect_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Create a request object.
request = self._create_request("disconnect_pull_consumer", inputs, outputs, exceptions)
# Make the request!
apply(request.invoke, args, kw)
# Return the results.
return request.results()
Fnorb.orb.TypeManager.TypeManager_init().add_type("IDL:omg.org/CosEventComm/PullConsumer:1.0", "000000000000000E00000045000000000000002A49444C3A6F6D672E6F72672F436F734576656E74436F6D6D2F50756C6C436F6E73756D65723A312E300000000000000D50756C6C436F6E73756D657200", PullConsumer)
#############################################################################
| mguijarr/hapPyTango | src/hapPyTango/CosEventComm/__init__.py | Python | mit | 7,461 |
#Normalize genotype and combine it with fake phenotype
import csv
import scipy as SP
import pdb
import os
import lmm_lasso
def normalize(l):
count={'A':0,'T':0,'G':0,'C':0}
for c in l:
count[c]+=1
dft=max(count,key=count.get)
r=[]
for c in l:
if c==dft:
r.append(0.0)
else:
r.append(1.0)
arr=SP.array(r)
return (arr-arr.mean())/arr.std()
if __name__ == "__main__":
# load genotypes
X = SP.genfromtxt('32.csv',delimiter=',',dtype=None)
# load leaf number phenotype
X1 = SP.genfromtxt('ln10.tsv', delimiter='\t',dtype=None)
pheno=(X[1]).tolist()
for c in range(2,len(pheno)):
for r in X1:
if int(pheno[c])==int(r[0]):
pheno[c]=r[1]
if int(pheno[c])>1000:
pheno[c]=0
#normalize and output phenotype
Y=[pheno[2:]]
for i in range(2,X.shape[0]):
Y.append(normalize(X[i][2:]))
with open("genotype32.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(Y[1:])
nf=len(Y)-1
print nf
#obtain genotype & phenotype for samples with complete phenotype
MY=SP.array(Y).transpose()
RMY=MY[MY[:,0]>0]
RY=RMY[:,0]
RY=(RY-RY.mean())/RY.std()
RX=RMY[:,1:]
#train null model for these samples
COR=1./nf*SP.dot(RX,RX.transpose())
res=lmm_lasso.train_nullmodel(RY,COR)
delta=SP.exp(res[2])
print delta
#get fake phenotype
FX=MY[:,1:]
FCOR=1./nf*SP.dot(FX,FX.transpose())
D=SP.diag(SP.array([delta]*len(Y[1])))
FY=SP.random.multivariate_normal(SP.array([0]*len(Y[1])),SP.add(FCOR,D))
FY=(FY-FY.mean())/FY.std()
FY=SP.array([FY])
with open("phenotype32.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(FY.transpose())
#validate fake phenotype, that it has similar delta as we start with
res=lmm_lasso.train_nullmodel(FY.transpose(),FCOR)
delta=SP.exp(res[2])
print delta
| wuchenxi/lmm_group_lasso | data/normalize_step2.py | Python | gpl-3.0 | 2,000 |
import math
from flask import abort
from mongoengine import QuerySet
from models.forum_model import Category
def forum_template_data(forum):
categories = Category.objects(forum=forum)
return dict(forum_categories=categories)
class Pagination(object):
def __init__(self, iterable, page, per_page):
if page < 1:
abort(404)
self.iterable = iterable
self.page = page
self.per_page = per_page
if isinstance(iterable, QuerySet):
self.total = iterable.count()
else:
self.total = len(iterable)
start_index = (page - 1) * per_page
end_index = page * per_page
self.items = iterable[start_index:end_index]
if not self.items and page != 1:
abort(404)
@property
def pages(self):
"""The total number of pages"""
return int(math.ceil(self.total / float(self.per_page)))
@property
def prev_num(self):
"""Number of the previous page."""
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
return self.page + 1 | JunctionAt/JunctionWWW | blueprints/forum/forum_util.py | Python | agpl-3.0 | 1,379 |
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^api/', include('entry.urls')),
url(r'^admin/', admin.site.urls),
]
| synasius/playground-react | reaggle/backend/toggle/urls.py | Python | mit | 174 |
import math
class Solution:
# @param num, a list of integer
# @return an integer
def maximumGap(self, num):
if len(num) < 2 or min(num) == max(num):
return 0
a = min(num)
b = max(num)
size = max(1, int(math.ceil((b - a) / (len(num)-1))))
bucket = [[None, None] for i in range((b-a)/size + 1)]
for n in num:
bucket_index = (n - a) / size
if bucket[bucket_index][0] is None:
bucket[bucket_index][0] = n
bucket[bucket_index][1] = n
else:
bucket[bucket_index][0] = min(bucket[bucket_index][0], n)
bucket[bucket_index][1] = max(bucket[bucket_index][1], n)
bucket = [bu for bu in bucket if bu[0] is not None]
print bucket
return max(bucket[i][0] - bucket[i-1][1] for i in range(1, len(bucket)))
if __name__ == "__main__":
s = Solution()
tests = [(3, 4, 1), (1,1,1,1,1,5,5,5,5,5)]
for test in tests:
print s.maximumGap(test)
| Crayzero/LeetCodeProgramming | Solutions/Maximum Gap/MaximumGap.py | Python | mit | 1,037 |
#!/usr/bin/python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
module1 = Extension("openhmd",
["pyopenhmd.pyx", "OpenHMD.cpp"],
language="c++",
libraries=["openhmd"],
include_dirs=['/usr/include/openhmd'])
setup(name = 'openhmd',
version = '1.0',
description = 'Python OpenHMD Wrapper',
ext_modules=[module1],
cmdclass = {'build_ext': build_ext})
| lubosz/python-rift | setup.py | Python | gpl-3.0 | 454 |
# coding: utf-8
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
r = mlab.csv2rec("avg_cpu_100.csv",names=["x","y"])
plt.plot(r["x"],r["y"],label="cpu")
r = mlab.csv2rec("avg_gpu_100.csv",names=["x","y"])
plt.plot(r["x"],r["y"],label="gpu")
plt.legend()
plt.xlabel("Grid size")
plt.ylabel("Run time")
plt.title("Performance of the CPU average function over grid size.")
plt.show()
| dorchard/ypnos | benchmarks/plot.py | Python | bsd-2-clause | 396 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ResourceMetricDefinitionPaged(Paged):
"""
A paging container for iterating over a list of ResourceMetricDefinition object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ResourceMetricDefinition]'}
}
def __init__(self, *args, **kwargs):
super(ResourceMetricDefinitionPaged, self).__init__(*args, **kwargs)
| v-iam/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/resource_metric_definition_paged.py | Python | mit | 938 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# Original module for stock.move from:
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
from openerp import netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class product_product(osv.osv):
_inherit = 'product.product'
_order = 'default_code, name'
# Override sale_stock procurement procedure!
def need_procurement(self, cr, uid, ids, context=None):
return False
class StockPicking(orm.Model):
''' Add alternative method for picking creation
'''
_inherit = 'stock.picking'
# Relink button:
def link_sale_id(self, cr, uid, ids, context=None):
''' Link sale_id depend on origin fields (multi ids)
'''
order_pool = self.pool.get('sale.order')
for picking in self.browse(cr, uid, ids, context=context):
origin = picking.origin
sale_ids = order_pool.search(cr, uid, [
('name', '=', origin)], context=context)
if sale_ids and len(sale_ids) == 1:
self.write(cr, uid, [picking.id], {
'sale_id': sale_ids[0],
}, context=context)
return True
# DDT button:
def open_ddt_report(self, cr, uid, ids, context=None):
''' Open DDT form if present
'''
assert len(ids) == 1, 'Only one picking!'
pick_proxy = self.browse(cr, uid, ids, context=context)[0]
if not pick_proxy.ddt_id:
return {} # raise error (never pass here!)
ctx = context.copy()
ctx.update({
#'active_model': 'stock.picking',
#'params': {},
#'search_disable_custom_filters': True,
'active_ids': [pick_proxy.ddt_id.id],
'active_id': pick_proxy.ddt_id.id,
})
return {
'type': 'ir.actions.report.xml',
'report_name': 'custom_ddt_report',
#'datas': datas,
'context': ctx,
}
#return self.pool.get('stock.ddt').open_ddt_report(
# cr, uid, [pick_proxy.ddt_id.id], context=context)
def open_ddt_form(self, cr, uid, ids, context=None):
''' Open DDT report directly if present
'''
assert len(ids) == 1, 'Only one picking!'
pick_proxy = self.browse(cr, uid, ids, context=context)[0]
if not pick_proxy.ddt_id:
return {} # TODO error?
return {
'name': 'DdT',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'stock.ddt',
'res_id': pick_proxy.ddt_id.id,
'view_id': False,
#'views': [(form_id, 'form'), (tree_id, 'tree')],
'type': 'ir.actions.act_window',
}
# Invoice button:
def open_invoice_report(self, cr, uid, ids, context=None):
''' Open DDT form if present
'''
assert len(ids) == 1, 'Only one picking!'
pick_proxy = self.browse(cr, uid, ids, context=context)[0]
if not pick_proxy.invoice_id:
return {} # raise error (never pass here!)
ctx = context.copy()
ctx.update({
'active_ids': [pick_proxy.invoice_id.id],
'active_id': pick_proxy.invoice_id.id,
})
return {
'type': 'ir.actions.report.xml',
'report_name': 'custom_mx_invoice_report',
'context': ctx,
}
def open_invoice_form(self, cr, uid, ids, context=None):
''' Open DDT report directly if present
'''
assert len(ids) == 1, 'Only one picking!'
# TODO view: account.invoice_form
pick_proxy = self.browse(cr, uid, ids, context=context)[0]
if not pick_proxy.invoice_id:
return {} # TODO error?
return {
'name': 'DdT',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': pick_proxy.invoice_id.id,
'view_id': False,
#'views': [(form_id, 'form'), (tree_id, 'tree')],
'type': 'ir.actions.act_window',
}
_columns = {
'sale_id': fields.many2one('sale.order', 'Sale order'),
}
class ResPartner(orm.Model):
''' Extra field for partner
'''
_inherit = 'res.partner'
_columns = {
'incoterm_id':fields.many2one(
'stock.incoterms', 'Default incoterm', ondelete='set null'),
}
class SaleOrder(orm.Model):
''' Extra field for order
'''
_inherit = 'sale.order'
# -------------------------------------------------------------------------
# Button events:
# -------------------------------------------------------------------------
def button_force_all_deadline_date(self, cr, uid, ids, context=None):
''' Force sale order date on all lines
'''
order_proxy = self.browse(cr, uid, ids, context=context)[0]
line_ids = [line.id for line in order_proxy.order_line]
self.pool.get('sale.order.line').write(cr, uid, line_ids, {
'date_deadline': order_proxy.date_deadline,
}, context=context)
return True
# -------------------------------------------------------------------------
# Override:
# -------------------------------------------------------------------------
# onchange:
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
''' Override standard procedure for add extra account field:
'''
# Call original procedure:
res = super(SaleOrder, self).onchange_partner_id(
cr, uid, ids, partner_id, context=context)
if 'value' not in res:
res['value'] = {}
# Append extra value:
if not partner_id: # reset:
res['value'].update({
'incoterm': False,
'carrier_id': False,
'carriage_condition_id': False,
'goods_description_id': False,
'transportation_reason_id': False,
# TODO remove:
'payment_term_id': False,
'payment_term': False,
'bank_account_id': False,
})
return res
partner_pool = self.pool.get('res.partner')
partner_proxy = partner_pool.browse(cr, uid, partner_id,
context=context)
res['value'].update({
'incoterm': partner_proxy.incoterm_id.id,
'carrier_id': partner_proxy.default_carrier_id.id,
'carriage_condition_id': partner_proxy.carriage_condition_id.id,
'goods_description_id': partner_proxy.goods_description_id.id,
'transportation_reason_id':
partner_proxy.transportation_reason_id.id,
# TODO remove:
'payment_term_id': partner_proxy.property_payment_term.id,
'payment_term': partner_proxy.property_payment_term.id,
})
# Set default account for partner
if partner_proxy.bank_ids:
res['value']['bank_account_id'] = partner_proxy.bank_ids[0].id
return res
_columns = {
# QUOTATION:
'date_valid': fields.date('Validity date',
help='Max date for validity of offer'),
# ORDER:
'date_confirm': fields.date('Date confirm',
help='Order confirm by the customer'), # TODO yet present in order?
'date_deadline': fields.date('Order deadline',
help='Delivery term for customer'),
# Fixed by delivery team:
'date_booked': fields.date('Booked date',
help='Delivery was booked and fixed!'),
'date_booked_confirmed': fields.boolean('Booked confirmed',
help='Booked confirmed for this date'),
'date_delivery': fields.date('Load / Availability',
help='For ex works is availability date, other clause is '
'load date'),
'date_delivery_confirmed': fields.boolean('Delivery confirmed',
help='Delivery confirmed, product available '
'(2 cases depend on incoterms)'),
# TODO used?
#'date_previous_deadline': fields.date(
# 'Previous deadline',
# help="If during sync deadline is modified this field contain old "
# "value before update"),
# TODO remove:
# Replaced with date_booked!!!
#'date_delivery': fields.date('Delivery',
# help='Contain delivery date, when present production plan work '
# 'with this instead of deadline value, if forced production '
# 'cannot be moved'),
# Account extra field saved in sale.order:
'default_carrier_id': fields.many2one('delivery.carrier', 'Carrier',
domain=[('is_vector', '=', True)]),
'carriage_condition_id': fields.many2one(
'stock.picking.carriage_condition', 'Carriage condition'),
'goods_description_id': fields.many2one(
'stock.picking.goods_description', 'Goods description'),
'transportation_reason_id': fields.many2one(
'stock.picking.transportation_reason', 'Transportation reason'),
# TODO remove:
'payment_term_id': fields.many2one(
'account.payment.term', 'Payment term'),
'bank_account_id': fields.many2one(
'res.partner.bank', 'Partner bank account'),
'bank_account_company_id': fields.many2one(
'res.partner.bank', 'Company bank account'),
# Alert:
'uncovered_payment': fields.boolean('Uncovered payment'),
'uncovered_alert': fields.char('Alert', size=64, readonly=True),
# TODO not used picking_ids!!!
'stock_picking_ids': fields.one2many(
'stock.picking', 'sale_id', 'Delivery'),
}
_defaults = {
'uncovered_alert': lambda *x: 'Alert: Uncovered payment!!!',
'date_valid': lambda *x: (
datetime.now() + timedelta(days=15)).strftime(
DEFAULT_SERVER_DATE_FORMAT),
}
class SaleOrderLine(orm.Model):
''' Extra field for order line
'''
_inherit = 'sale.order.line'
# ----------------
# Function fields:
# ----------------
def _function_get_delivered(self, cr, uid, ids, fields, args,
context=None):
''' Fields function for calculate delivered elements in picking orders
'''
res = {}
move_pool = self.pool.get('stock.move')
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = 0.0
# Problems!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
move_ids = move_pool.search(cr, uid, [
('sale_line_id', '=', line.id),
], context=context)
for move in move_pool.browse(cr, uid, move_ids, context=context):
# TODO jump picking for virtual count!!!!!!!!!!!!!!!!!!!!!!!!!!
#if move.picking_id.ddt_number: # was marked as DDT
# TODO check UOM!!! for
res[line.id] += move.product_uos_qty
#print line.product_id.default_code, move.product_uos_qty,
#move.picking_id.name, move.picking_id.invoice_state,
#move.picking_id.invoice_id.number
return res
_columns = {
'gr_weight': fields.float('Gross weight'),
'colls': fields.integer('Colls'),
#states={'draft': [('readonly', False)]}),
'date_deadline': fields.date('Deadline'),
'date_delivery': fields.related( # TODO use booked!!!!
'order_id', 'date_delivery', type='date', string='Date delivery'),
'alias_id':fields.many2one(
'product.product', 'Marked as product', ondelete='set null'),
'delivered_qty': fields.function(
_function_get_delivered, method=True, type='float', readonly=True,
string='Delivered', store=False,
help='Quantity delivered with DDT out'),
# TODO add to delivery qty
# Order ref:
'client_order_ref': fields.related('order_id',
'client_order_ref', type='char', string='Client order ref.'),
}
_defaults = {
'colls': lambda *x: 1,
}
class StockMove(orm.Model):
''' Extra field for order line
'''
_inherit = 'stock.move'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', 'Sale line'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-mx8 | mx_sale/model/sale.py | Python | agpl-3.0 | 14,704 |
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import hildon
import gpodder
_ = gpodder.gettext
from gpodder import util
from gpodder.gtkui.interface.common import BuilderWidget
from gpodder.gtkui.model import EpisodeListModel
from gpodder.gtkui.model import PodcastChannelProxy
from gpodder.gtkui.frmntl.episodeactions import gPodderEpisodeActions
class gPodderEpisodes(BuilderWidget):
def new(self):
self.channel = None
self.episode_actions = gPodderEpisodeActions(self.main_window, \
episode_list_status_changed=self.episode_list_status_changed, \
episode_is_downloading=self.episode_is_downloading, \
show_episode_shownotes=self.show_episode_shownotes, \
playback_episodes=self.playback_episodes, \
download_episode_list=self.download_episode_list, \
show_episode_in_download_manager=self.show_episode_in_download_manager, \
add_download_task_monitor=self.add_download_task_monitor, \
remove_download_task_monitor=self.remove_download_task_monitor, \
for_each_episode_set_task_status=self.for_each_episode_set_task_status, \
delete_episode_list=self.delete_episode_list)
# Tap-and-hold (aka "long press") context menu
self.touched_episode = None
self.context_menu = gtk.Menu()
# "Emulate" hildon_gtk_menu_new
self.context_menu.set_name('hildon-context-sensitive-menu')
self.context_menu.append(self.action_shownotes.create_menu_item())
self.context_menu.append(self.action_download.create_menu_item())
self.context_menu.append(self.action_delete.create_menu_item())
self.context_menu.append(gtk.SeparatorMenuItem())
self.context_menu.append(self.action_keep.create_menu_item())
self.context_menu.append(self.action_mark_as_old.create_menu_item())
self.context_menu.show_all()
self.treeview.tap_and_hold_setup(self.context_menu)
# Workaround for Maemo bug XXX
self.button_search_episodes_clear.set_name('HildonButton-thumb')
appmenu = hildon.AppMenu()
for action in (self.action_rename, \
self.action_play_m3u, \
self.action_login, \
self.action_unsubscribe, \
self.action_update, \
self.action_check_for_new_episodes, \
self.action_delete_episodes):
button = gtk.Button()
action.connect_proxy(button)
appmenu.append(button)
for filter in (self.item_view_episodes_all, \
self.item_view_episodes_undeleted, \
self.item_view_episodes_downloaded):
button = gtk.ToggleButton()
filter.connect_proxy(button)
appmenu.add_filter(button)
appmenu.show_all()
self.main_window.set_app_menu(appmenu)
def on_rename_button_clicked(self, widget):
if self.channel is None:
return
new_title = self.show_text_edit_dialog(_('Rename podcast'), \
_('New name:'), self.channel.title)
if new_title is not None and new_title != self.channel.title:
self.channel.set_custom_title(new_title)
self.main_window.set_title(self.channel.title)
self.channel.save()
self.show_message(_('Podcast renamed: %s') % new_title)
self.update_podcast_list_model(urls=[self.channel.url])
def on_login_button_clicked(self, widget):
accept, auth_data = self.show_login_dialog(_('Login to %s') % \
self.channel.title, '', \
self.channel.username, \
self.channel.password)
if accept:
self.channel.username, self.channel.password = auth_data
self.channel.save()
def on_play_m3u_button_clicked(self, widget):
if self.channel is not None:
util.gui_open(self.channel.get_playlist_filename())
def on_website_button_clicked(self, widget):
if self.channel is not None:
util.open_website(self.channel.link)
def on_update_button_clicked(self, widget):
self.on_itemUpdateChannel_activate()
def on_unsubscribe_button_clicked(self, widget):
self.on_delete_event(widget, None)
self.on_itemRemoveChannel_activate(widget)
def on_episode_selected(self, treeview, path, column):
model = treeview.get_model()
episode = model.get_value(model.get_iter(path), \
EpisodeListModel.C_EPISODE)
self.episode_actions.show_episode(episode)
def on_delete_event(self, widget, event):
self.main_window.hide()
self.channel = None
self.hide_episode_search()
return True
def on_treeview_button_press(self, widget, event):
result = self.treeview.get_path_at_pos(int(event.x), int(event.y))
if result is not None:
path, column, x, y = result
model = self.treeview.get_model()
episode = model.get_value(model.get_iter(path), \
EpisodeListModel.C_EPISODE)
if episode.was_downloaded():
self.action_delete.set_property('visible', not episode.is_locked)
self.action_keep.set_property('visible', True)
self.action_download.set_property('visible', not episode.was_downloaded(and_exists=True))
else:
self.action_delete.set_property('visible', False)
self.action_keep.set_property('visible', False)
self.action_download.set_property('visible', not self.episode_is_downloading(episode))
self.touched_episode = None
self.action_keep.set_active(episode.is_locked)
self.action_mark_as_old.set_active(not episode.is_played)
self.touched_episode = episode
else:
self.touched_episode = None
def on_shownotes_button_clicked(self, widget):
if self.touched_episode is not None:
self.show_episode_shownotes(self.touched_episode)
def on_download_button_clicked(self, widget):
if self.touched_episode is not None:
self.show_message(_('Downloading episode'))
self.download_episode_list([self.touched_episode])
def on_delete_button_clicked(self, widget):
if self.touched_episode is not None:
self.delete_episode_list([self.touched_episode])
def on_keep_button_clicked(self, widget):
if self.touched_episode is not None:
self.touched_episode.mark(is_locked=not self.touched_episode.is_locked)
self.episode_list_status_changed([self.touched_episode])
def on_mark_as_old_button_clicked(self, widget):
if self.touched_episode is not None:
self.touched_episode.mark(is_played=not self.touched_episode.is_played)
self.episode_list_status_changed([self.touched_episode])
def on_check_for_new_episodes_button_clicked(self, widget):
self.show_message(_('Checking for new episodes...'))
self.on_itemUpdate_activate(widget)
def show(self):
# Check if we are displaying the "all episodes" view
all_episodes = isinstance(self.channel, PodcastChannelProxy)
for action in (self.action_rename, \
self.action_play_m3u, \
self.action_login, \
self.action_unsubscribe, \
self.action_update):
action.set_visible(not all_episodes)
for action in (self.action_check_for_new_episodes, \
self.action_delete_episodes):
action.set_visible(all_episodes)
self.main_window.set_title(self.channel.title)
self.main_window.show()
self.treeview.grab_focus()
| christofdamian/gpodder | src/gpodder/gtkui/frmntl/episodes.py | Python | gpl-3.0 | 8,787 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a specialized line edit for entering IRC messages.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import Qt, qVersion
from E5Gui.E5LineEdit import E5LineEdit, E5ClearableLineEdit
class IrcMessageEdit(E5ClearableLineEdit):
"""
Class implementing a specialized line edit for entering IRC messages.
"""
MaxHistory = 100
def __init__(self, parent=None, inactiveText="",
side=E5LineEdit.RightSide):
"""
Constructor
@param parent reference to the parent widget (QWidget)
@keyparam inactiveText text to be shown on inactivity (string)
@keyparam side side the clear button should be shown at
(E5LineEdit.RightSide, E5LineEdit.LeftSide)
"""
super(IrcMessageEdit, self).__init__(parent, inactiveText, side)
self.__historyList = [""] # initialize with one empty line
self.__historyLine = 0
def setText(self, text):
"""
Public method to set the text.
Note: This reimplementation ensures, that the cursor is at the end of
the text.
@param text text to be set (string)
"""
super(IrcMessageEdit, self).setText(text)
self.setCursorPosition(len(text))
def keyPressEvent(self, evt):
"""
Protected method implementing special key handling.
@param evt reference to the event (QKeyEvent)
"""
key = evt.key()
if key == Qt.Key_Up:
self.__getHistory(True)
return
elif key == Qt.Key_Down:
self.__getHistory(False)
return
elif key in [Qt.Key_Return, Qt.Key_Enter]:
if self.text():
self.__addHistory(self.text())
elif evt.text() == chr(21):
# ^U: clear the text
self.setText("")
super(IrcMessageEdit, self).keyPressEvent(evt)
def wheelEvent(self, evt):
"""
Protected slot to support wheel events.
@param evt reference to the wheel event (QWheelEvent)
"""
if qVersion() >= "5.0.0":
delta = evt.angleDelta().y()
else:
delta = evt.delta()
if delta > 0:
self.__getHistory(True)
elif delta < 0:
self.__getHistory(False)
super(IrcMessageEdit, self).wheelEvent(evt)
def __addHistory(self, txt):
"""
Private method to add an entry to the history.
@param txt text to be added to the history (string)
"""
# Only add the entry, if it is not the same as last time
if len(self.__historyList) == 1 or \
(len(self.__historyList) > 1 and self.__historyList[1] != txt):
# Replace empty first entry and add new empty first entry
self.__historyList[0] = txt
self.__historyList.insert(0, "")
# Keep history below the defined limit
del self.__historyList[IrcMessageEdit.MaxHistory:]
self.__historyLine = 0
def __getHistory(self, up):
"""
Private method to move in the history.
@param up flag indicating the direction (boolean)
"""
# preserve the current text, if it is not empty
if self.text():
self.__historyList[self.__historyLine] = self.text()
if up:
self.__historyLine += 1
# If the position was moved past the end of the history,
# go to the last entry
if self.__historyLine == len(self.__historyList):
self.__historyLine -= 1
return
else:
# If the position is at the top of the history, arrow-down shall
# add the text to the history and clear the line edit for new input
if self.__historyLine == 0:
if self.text():
self.__addHistory(self.text())
self.setText("")
else:
# If the position is not at the top of the history,
# decrement it
self.__historyLine -= 1
# replace the text of the line edit with the selected history entry
self.setText(self.__historyList[self.__historyLine])
| davy39/eric | Network/IRC/IrcMessageEdit.py | Python | gpl-3.0 | 4,486 |
from __future__ import absolute_import
import os
import django
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'music.settings')
django.setup() # Required for django 1.7+
app = Celery('tasks')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) | kburts/drf-music | Backend/music/celery.py | Python | mit | 359 |
#-*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import daily_manpower
| yvaucher/hr | __unported__/hr_report_manpower/wizard/__init__.py | Python | agpl-3.0 | 845 |
__VERSION__ = '0.5.2'
| sykora/ReSTinPeace | rip/__init__.py | Python | gpl-3.0 | 22 |
Subsets and Splits