repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
shreyshahi/ghofAtkinson | ghofAtkinson/model.py | 1 | 2759 | import numpy as np
PERIODS = np.array([0.01, 0.07, 0.09, 0.11, 0.14, 0.18, 0.22, 0.27, 0.34, 0.42, 0.53, 0.65, 0.81, 1.01, 1.25, 1.56, 1.92, 2.44, 3.03, 3.7, 4.55, 5.88, 7.14, 9.09])
c1 = [-0.00219, -0.00236, -0.00244, -0.00245, -0.0024, -0.00235, -0.00235, -0.00233, -0.00231, -0.00224, -0.00213, -0.002, -0.00183, -0.00158, -0.00133, -0.00112, -0.00086, -0.00059, -0.00039, -0.00023, -0.00005, 0, 0, 0]
c2 = [-0.00298, -0.00329, -0.00346, -0.00356, -0.00357, -0.00358, -0.00355, -0.00346, -0.00333, -0.00315, -0.0029, -0.00262, -0.00234, -0.00205, -0.00177, -0.00152, -0.00125, -0.00097, -0.00075, -0.00057, -0.0004, -0.00027, -0.00019, -0.00019]
c3 = [-0.219, -0.046, 0.027, 0.01, -0.082, -0.18, -0.289, -0.386, -0.438, -0.52, -0.606, -0.672, -0.705, -0.69, -0.646, -0.578, -0.518, -0.513, -0.554, -0.574, -0.561, -0.491, -0.462, -0.413]
a = [2.8193, 3.1807, 3.3592, 3.4483, 3.5005, 3.4463, 3.3178, 3.2008, 3.0371, 2.7958, 2.5332, 2.3234, 2.1321, 1.9852, 1.8442, 1.6301, 1.4124, 1.1154, 0.7965, 0.5093, 0.2578, -0.1469, -0.5012, -1.093]
b = [0.1908, 0.1759, 0.17, 0.1669, 0.1604, 0.165, 0.1763, 0.1839, 0.197, 0.2154, 0.2331, 0.2435, 0.2522, 0.2561, 0.2599, 0.273, 0.2851, 0.3015, 0.3197, 0.3361, 0.3497, 0.3835, 0.4119, 0.4641]
Dcascadia = [-0.301, -0.357, -0.357, -0.319, -0.272, -0.237, -0.183, -0.114, -0.046, 0.002, 0.007, 0.011, 0.014, 0.021, 0.089, 0.139, 0.174, 0.129, 0.079, 0.044, 0.013, 0, 0, 0]
PHI = [0.284, 0.313, 0.326, 0.329, 0.324, 0.312, 0.31, 0.312, 0.307, 0.295, 0.276, 0.257, 0.249, 0.249, 0.261, 0.274, 0.285, 0.275, 0.264, 0.252, 0.237, 0.218, 0.201, 0.175]
TAU = [0.196, 0.215, 0.22, 0.218, 0.212, 0.206, 0.202, 0.199, 0.191, 0.171, 0.155, 0.147, 0.131, 0.115, 0.11, 0.113, 0.121, 0.132, 0.137, 0.138, 0.147, 0.151, 0.148, 0.155]
def computeSpectra(mag, r, faba, vs30, cascadia, epistemic, per):
F = 1 - faba
B = faba
pIdx = np.nonzero(PERIODS == per)[0][0]
c0 = a[pIdx] + b[pIdx] * mag
reff = np.sqrt(r**2 + 60**2)
logSa = c0 - np.log10(reff) + c1[pIdx] * F * r + c2[pIdx] * B * r + c3[pIdx] * np.log10(vs30/760)
if cascadia == 1:
logSa += Dcascadia[pIdx]
if epistemic == 1:
correction = np.min([0.15 + 0.0007*r , 0.35])
logSa += correction
if epistemic == -1:
correction = -1 * np.min([0.15 + 0.0007*r , 0.35])
logSa += correction
return logSa
def interEventSigma(periods):
tau = [np.interp(np.log10(per) , np.log10(PERIODS) , TAU) for per in periods]
return tau
def intraEventSigma(periods):
phi = [np.interp(np.log10(per) , np.log10(PERIODS) , PHI) for per in periods]
return phi
| mit | 1,697,952,010,962,822,000 | 58.978261 | 243 | 0.538238 | false |
rancher/cattle | tests/integration/cattletest/core/test_volume.py | 1 | 14377 | from random import choice
from string import hexdigits
from common_fixtures import * # NOQA
from gdapi import ApiError
from gdapi import ClientApiError
VOLUME_CLEANUP_LABEL = 'io.rancher.container.volume_cleanup_strategy'
def test_volume_cant_delete_active(client, context):
c = client.create_container(imageUuid=context.image_uuid)
c = client.wait_success(c)
assert c.state == 'running'
volume = c.volumes()[0]
assert volume.state == 'active'
# Assert an active volume cannot be deleted
with pytest.raises(ApiError) as e:
client.delete(volume)
assert e.value.error.status == 405
def test_volume_create_state(client, context):
name = random_str()
c = client.create_volume(name=name, driver='local')
c = client.wait_success(c)
assert c.state == 'inactive'
assert c.uri == 'local:///%s' % name
volume = client.wait_success(client.delete(c))
assert volume.removed is not None
def test_volume_create_size_validation(client, context):
with pytest.raises(ApiError) as e:
client.create_volume(name='foo', driver='foo', sizeMb=111)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
def test_volume_create_without_driver_name(client, context):
name = random_str()
with pytest.raises(ApiError) as e:
client.create_volume(name=name)
assert e.value.error.status == 422
assert e.value.error.code == 'MissingRequired'
def test_volume_create_with_opts(client, context):
name = random_str()
c = client.create_volume(name=name,
driver='local',
driverOpts={'size': '1G'})
c = client.wait_success(c)
assert c.state == 'inactive'
assert c.uri == 'local:///%s' % name
volume = client.wait_success(client.delete(c))
assert volume.removed is not None
def test_create_container_with_volume(new_context, super_client):
client = new_context.client
name1 = random_str()
v1 = client.create_volume(name=name1, driver='local')
v1 = client.wait_success(v1)
assert v1.state == 'inactive'
name2 = random_str()
v2 = client.create_volume(name=name2, driver='local')
v2 = client.wait_success(v2)
assert v2.state == 'inactive'
dataVolumeMounts = {'/var/lib/docker/mntpt1': v1.id,
'/var/lib/docker/mntpt2': v2.id}
dataVolumes = {'/home': '/home'}
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumes=dataVolumes,
dataVolumeMounts=dataVolumeMounts)
c = client.wait_success(c, timeout=240)
assert c.state == 'running'
dataVol1 = '%s:/var/lib/docker/mntpt1' % name1
dataVol2 = '%s:/var/lib/docker/mntpt2' % name2
dataVol1Found = False
dataVol2Found = False
for dataVol in c.dataVolumes:
if dataVol == dataVol1:
dataVol1Found = True
if dataVol == dataVol2:
dataVol2Found = True
assert dataVol1Found and dataVol2Found
# Mounting happens in docker specific code; need to simulate
create_mount(v1, c, client, super_client)
create_mount(v2, c, client, super_client)
v1 = client.wait_success(v1)
v2 = client.wait_success(v2)
assert v1.state == 'active'
assert v2.state == 'active'
# Assert an active volume cannot be deleted
with pytest.raises(ApiError) as e:
client.delete(v1)
assert e.value.error.status == 405
assert len(c.volumes()) == 1
assert c.volumes()[0].id not in [v1.id, v2.id]
vsp1 = super_client.list_volumeStoragePoolMap(volumeId=v1.id)
vsp2 = super_client.list_volumeStoragePoolMap(volumeId=v2.id)
assert vsp1 is not None and len(vsp1) == 1
assert vsp2 is not None and len(vsp2) == 1
spid1 = vsp1[0].storagePoolId
spid2 = vsp2[0].storagePoolId
host1 = super_client.list_storagePoolHostMap(storagePoolId=spid1)
host2 = super_client.list_storagePoolHostMap(storagePoolId=spid2)
assert host1[0].id == host2[0].id
new_host = register_simulated_host(new_context)
with pytest.raises(ClientApiError) as e:
c = client.create_container(imageUuid=new_context.image_uuid,
volumeDriver='local',
dataVolumes=dataVolumes,
requestedHostId=new_host.id,
dataVolumeMounts=dataVolumeMounts)
client.wait_success(c)
assert 'must have exactly these pool(s)' in e.value.message
def create_resources(context, client, super_client, labels={}):
vol = client.create_volume(name=random_str(), driver='local')
unnamed_vol = client.create_volume(name=random_vol_name(), driver='local')
data_volume_mounts = {'/con/path': vol.id, '/path2': unnamed_vol.id}
c = client.create_container(imageUuid=context.image_uuid,
dataVolumeMounts=data_volume_mounts,
labels=labels)
c = client.wait_success(c)
# Simulate volume mount (only happens with real docker)
create_mount(vol, c, client, super_client)
create_mount(unnamed_vol, c, client, super_client)
return c, vol, unnamed_vol
def test_instance_volume_cleanup_strategy(new_context, super_client):
client = new_context.client
# Assert default strategy to delete unnamed volumes only
c, vol, unnamed_vol = create_resources(new_context, client, super_client)
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.state == 'detached')
wait_for_condition(client, unnamed_vol, lambda x: x.removed is not None)
# Assert explicit 'unnamed' strategy
c, vol, unnamed_vol = create_resources(
new_context, client, super_client, labels={
VOLUME_CLEANUP_LABEL: 'unnamed'})
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.state == 'detached')
wait_for_condition(client, unnamed_vol, lambda x: x.removed is not None)
# Assert 'none' strategy
c, vol, unnamed_vol = create_resources(
new_context, client, super_client, labels={
VOLUME_CLEANUP_LABEL: 'none'})
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.state == 'detached')
wait_for_condition(client, unnamed_vol, lambda x: x.state == 'detached')
# Assert 'all' strategy
c, vol, unnamed_vol = create_resources(
new_context, client, super_client, labels={
VOLUME_CLEANUP_LABEL: 'all'})
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
wait_for_condition(client, vol, lambda x: x.removed is not None)
wait_for_condition(client, unnamed_vol, lambda x: x.removed is not None)
# Assert invalid value for label is rejected
with pytest.raises(ApiError):
create_resources(
new_context, client, super_client,
labels={VOLUME_CLEANUP_LABEL: 'foo'})
def create_container_and_mount(client, data_volume_mounts, new_context,
super_client, vols):
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts,
labels={VOLUME_CLEANUP_LABEL: 'all'})
c = client.wait_success(c)
for vol in vols:
c, m = create_mount(vol, c, client, super_client)
return c
def purge_instance_and_check_volume_state(c, vols, client, state=None):
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
client.wait_success(c.purge())
def check(obj):
if state is not None:
return obj.state == state
else:
return obj.removed is not None
def report(obj):
if state is not None:
return 'State: %s. Expected: %s' % (obj.state, state)
else:
return 'Removed is None'
for vol in vols:
wait_for_condition(client, vol,
lambda x: check(x),
lambda x: report(x))
def create_volume_and_dvm(client, count):
dvms = {}
vols = []
for i in range(0, count):
v1 = client.create_volume(name=random_str(), driver='local')
dvms['/con/path%s' % i] = v1.id
vols.append(v1)
return dvms, vols
def test_volume_remove_on_purge(new_context, super_client):
client = new_context.client
# Simple case: volume associated with one container that is purged
# volume gets removed
dvms, vols = create_volume_and_dvm(client, 2)
c = create_container_and_mount(client, dvms, new_context,
super_client, vols)
purge_instance_and_check_volume_state(c, vols, client)
# Vol associated with multiple containers
dvms, vols = create_volume_and_dvm(client, 2)
c = create_container_and_mount(client, dvms, new_context,
super_client, vols)
c2 = create_container_and_mount(client, dvms, new_context,
super_client, vols)
purge_instance_and_check_volume_state(c, vols, client, state='active')
purge_instance_and_check_volume_state(c2, vols, client)
def test_volume_mounting_and_delete(new_context, super_client):
client = new_context.client
v1 = client.create_volume(name=random_str(), driver='local')
data_volume_mounts = {'/con/path': v1.id}
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c)
assert c.state == 'running'
v1 = client.wait_success(v1)
assert len(v1.storagePools()) == 1
# Creating a mount that associates the volume to the container
# only happens when integrating with real docker, so we'll simulate it
c, m = create_mount(v1, c, client, super_client)
# Assert that creating the mount results in activating volume
check_mount_count(client, c, 1)
assert m.state == 'active'
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
# Assert that a volume with mounts cannot be deactivated, removed or purged
assert 'deactivate' not in v1.actions and 'remove' not in v1.actions \
and 'purge' not in v1.actions
# Assert that once the container is removed, the mounts are removed and the
# the volume is deactivated
c = client.wait_success(c.stop())
c = client.wait_success(c.remove())
v1 = wait_for_condition(client, v1, lambda x: x.state == 'detached')
check_mount_count(client, c, 0)
# Mount to new container to assert volume goes back to active
c2 = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c2 = client.wait_success(c2)
c2, m2 = create_mount(v1, c2, client, super_client)
check_mount_count(client, c2, 1)
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
# Make the volume be mounted to two containers
c3 = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts,
labels={VOLUME_CLEANUP_LABEL: 'all'})
c3 = client.wait_success(c3)
c3, m3 = create_mount(v1, c3, client, super_client)
check_mount_count(client, c3, 1)
check_mount_count(client, v1, 2)
# Remove 1 one of the containers and assert that actions are still blocked
c2 = client.wait_success(c2.stop())
c2 = client.wait_success(c2.remove())
check_mount_count(client, c2, 0)
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
v1 = client.wait_success(v1)
check_mount_count(client, v1, 1)
assert 'deactivate' not in v1.actions and 'remove' not in v1.actions \
and 'purge' not in v1.actions
# Remove remaining container and assert the volume can be removed
c3 = client.wait_success(c3.stop())
c3 = client.wait_success(c3.remove())
check_mount_count(client, c3, 0)
wait_for_condition(client, v1, lambda x: x.removed is not None)
def test_volume_storage_pool_purge(new_context, super_client):
client = new_context.client
vol_name = random_str()
v1 = client.create_volume(name=vol_name, driver='local')
data_volume_mounts = {'/con/path': v1.id}
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumeMounts=data_volume_mounts)
c = client.wait_success(c)
assert c.state == 'running'
c, m = create_mount(v1, c, client, super_client)
check_mount_count(client, c, 1)
assert m.state == 'active'
v1 = wait_for_condition(client, v1, lambda x: x.state == 'active')
sp = v1.storagePools()[0]
host = c.hosts()[0]
host = client.wait_success(host.deactivate())
host = client.wait_success(host.remove())
client.wait_success(host.purge())
wait_for_condition(client, sp, lambda x: x.removed is not None)
wait_for_condition(client, v1, lambda x: x.removed is not None)
register_simulated_host(new_context)
c = client.create_container(imageUuid=new_context.image_uuid,
dataVolumes=['%s:/foo' % vol_name])
c = client.wait_success(c)
assert c.state == 'running'
def create_mount(volume, container, client, super_client):
mount = super_client.create_mount(volumeId=volume.id,
instanceId=container.id,
accountId=container.accountId)
mount = super_client.wait_success(mount)
return client.reload(container), mount
def check_mount_count(client, resource, count):
wait_for_condition(client, resource, lambda x: len(
[i for i in resource.mounts_link() if i.state != 'inactive']) == count)
def random_vol_name():
# Emulates the random name that docker would assign to an unnamed volume
return ''.join(choice(hexdigits) for i in range(64))
| apache-2.0 | 3,075,792,182,796,224,500 | 36.054124 | 79 | 0.634555 | false |
vishwa91/OptSys | examples/relay.py | 1 | 1530 | #!/usr/bin/env python3
import os, sys
sys.path.append('../modules')
import numpy as np
import matplotlib.pyplot as plt
import raytracing as rt
import visualize as vis
if __name__ == '__main__':
# Create a relay lens system
components = []
rays = []
image_plane = -100
# System contains two lenses
components.append(rt.Lens(f=100,
aperture=100,
pos=[0,0],
theta=0))
components.append(rt.Lens(f=100,
aperture=100,
pos=[30,0],
theta=0))
# Create three points and three rays from each point
rays.append([image_plane, 10, -np.pi/20])
rays.append([image_plane, 10, 0])
rays.append([image_plane, 10, np.pi/20])
rays.append([image_plane, 0, -np.pi/20])
rays.append([image_plane, 0, 0])
rays.append([image_plane, 0, np.pi/20])
rays.append([image_plane, -10, -np.pi/20])
rays.append([image_plane, -10, 0])
rays.append([image_plane, -10, np.pi/20])
colors = ['c', 'c', 'c', 'm', 'm', 'm', 'y', 'y', 'y']
# Propagate the rays
ray_bundles = rt.propagate_rays(components, rays)
# Create a new canvas
canvas = vis.Canvas([-200, 200], [-100, 100])
# Draw the components
canvas.draw_components(components)
# Draw the rays
canvas.draw_rays(ray_bundles, colors)
# Show the system
canvas.show()
# Save a copy
canvas.save('relay.png')
| mit | -3,374,446,642,605,414,000 | 24.5 | 58 | 0.545752 | false |
robmcmullen/peppy | peppy/major_modes/flagship.py | 1 | 1808 | # peppy Copyright (c) 2006-2009 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""FlagShip programming language editing support.
Major mode for editing FlagShip files.
Supporting actions and minor modes should go here only if they are uniquely
applicable to this major mode and can't be used in other major modes. If
actions can be used with multiple major modes, they should be put in a
separate plugin in the peppy/plugins directory.
"""
import os
import wx
import wx.stc
from peppy.lib.foldexplorer import *
from peppy.lib.autoindent import *
from peppy.yapsy.plugins import *
from peppy.major import *
from peppy.editra.style_specs import unique_keywords
from peppy.fundamental import FundamentalMode
class FlagShipMode(FundamentalMode):
"""Stub major mode for editing FlagShip files.
This major mode has been automatically generated and is a boilerplate/
placeholder major mode. Enhancements to this mode are appreciated!
"""
keyword = 'FlagShip'
editra_synonym = 'FlagShip'
stc_lexer_id = wx.stc.STC_LEX_FLAGSHIP
start_line_comment = u'//'
end_line_comment = ''
icon = 'icons/page_white.png'
default_classprefs = (
StrParam('extensions', 'prg', fullwidth=True),
StrParam('keyword_set_0', unique_keywords[128], hidden=False, fullwidth=True),
StrParam('keyword_set_1', unique_keywords[129], hidden=False, fullwidth=True),
StrParam('keyword_set_2', unique_keywords[130], hidden=False, fullwidth=True),
StrParam('keyword_set_3', unique_keywords[131], hidden=False, fullwidth=True),
)
class FlagShipModePlugin(IPeppyPlugin):
"""Plugin to register modes and user interface for FlagShip
"""
def getMajorModes(self):
yield FlagShipMode
| gpl-2.0 | -6,349,689,533,123,834,000 | 33.113208 | 86 | 0.722898 | false |
springload/wagtailmodeladmin | wagtailmodeladmin/views.py | 1 | 30988 | import sys
import operator
from collections import OrderedDict
from functools import reduce
from django.db import models
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.constants import LOOKUP_SEP
from django.db.models.sql.constants import QUERY_TERMS
from django.shortcuts import get_object_or_404, redirect, render
from django.core.urlresolvers import reverse
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation,
)
from django.core.paginator import Paginator, InvalidPage
from django.contrib.admin import FieldListFilter, widgets
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.exceptions import DisallowedModelAdminLookup
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote)
from django.utils import six
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.http import urlencode
from django.utils.functional import cached_property
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from .permission_helpers import PermissionHelper, PagePermissionHelper
from .utils import get_url_name, ActionButtonHelper, permission_denied
from .forms import ParentChooserForm
# IndexView settings
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR)
class WMABaseView(TemplateView):
"""
Groups together common functionality for all app views.
"""
model_admin = None
meta_title = ''
page_title = ''
page_subtitle = ''
def __init__(self, model_admin):
self.model_admin = model_admin
self.model = model_admin.model
self.opts = model_admin.model._meta
self.pk_attname = self.opts.pk.attname
self.is_pagemodel = model_admin.is_pagemodel
if self.is_pagemodel:
self.permission_helper = PagePermissionHelper(self.model)
else:
self.permission_helper = PermissionHelper(self.model)
@cached_property
def app_label(self):
return capfirst(force_text(self.opts.app_label))
@cached_property
def model_name(self):
return capfirst(force_text(self.opts.verbose_name))
@cached_property
def model_name_plural(self):
return capfirst(force_text(self.opts.verbose_name_plural))
@cached_property
def get_index_url(self):
return self.model_admin.get_index_url()
@cached_property
def get_create_url(self):
return self.model_admin.get_create_url()
@cached_property
def menu_icon(self):
return self.model_admin.get_menu_icon()
@cached_property
def header_icon(self):
return self.menu_icon
def get_edit_url(self, obj):
return reverse(get_url_name(self.opts, 'edit'), args=(obj.pk,))
def get_delete_url(self, obj):
return reverse(get_url_name(self.opts, 'delete'), args=(obj.pk,))
def prime_session_for_redirection(self):
self.request.session['return_to_index_url'] = self.get_index_url
def get_page_title(self):
return self.page_title or self.model_name_plural
def get_meta_title(self):
return self.meta_title or self.get_page_title()
def get_base_queryset(self, request):
return self.model_admin.get_queryset(request)
class WMAFormView(WMABaseView, FormView):
def get_edit_handler_class(self):
panels = extract_panel_definitions_from_model_class(self.model)
return ObjectList(panels).bind_to_model(self.model)
def get_form_class(self):
return self.get_edit_handler_class().get_form_class(self.model)
def get_success_url(self):
return self.get_index_url
def get_instance(self):
return getattr(self, 'instance', None) or self.model()
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs.update({'instance': self.get_instance()})
return kwargs
def get_context_data(self, **kwargs):
instance = self.get_instance()
edit_handler_class = self.get_edit_handler_class()
form = self.get_form()
return {
'view': self,
'edit_handler': edit_handler_class(instance=instance, form=form)
}
def get_success_message(self, instance):
return _("{model_name} '{instance}' created.").format(
model_name=self.model_name, instance=instance),
def get_success_message_buttons(self, instance):
return [
messages.button(self.get_edit_url(instance), _('Edit'))
]
def get_error_message(self):
model_name = self.model_name.lower()
return _("The %s could not be created due to errors.") % model_name
def form_valid(self, form):
instance = form.save()
messages.success(
self.request, self.get_success_message(instance),
buttons=self.get_success_message_buttons(instance)
)
return redirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, self.get_error_message())
return self.render_to_response(self.get_context_data())
class ObjectSpecificView(WMABaseView):
object_id = None
instance = None
def __init__(self, model_admin, object_id):
super(ObjectSpecificView, self).__init__(model_admin)
self.object_id = object_id
self.pk_safe = quote(object_id)
filter_kwargs = {}
filter_kwargs[self.pk_attname] = self.pk_safe
object_qs = model_admin.model._default_manager.get_queryset().filter(
**filter_kwargs)
self.instance = get_object_or_404(object_qs)
def check_action_permitted(self):
return True
def get_edit_url(self, obj=None):
return reverse(get_url_name(self.opts, 'edit'), args=(self.pk_safe,))
def get_delete_url(self, obj=None):
return reverse(get_url_name(self.opts, 'delete'), args=(self.pk_safe,))
class IndexView(WMABaseView):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.list_display = self.model_admin.get_list_display(request)
self.list_filter = self.model_admin.get_list_filter(request)
self.search_fields = self.model_admin.get_search_fields(request)
self.items_per_page = self.model_admin.list_per_page
self.select_related = self.model_admin.list_select_related
request = self.request
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
self.query = request.GET.get(SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
if not self.permission_helper.allow_list_view(request.user):
return permission_denied(request)
return super(IndexView, self).dispatch(request, *args, **kwargs)
def get_action_buttons_for_obj(self, user, obj):
bh = ActionButtonHelper(self.model, self.permission_helper, user, obj)
return bh.get_permitted_buttons()
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
if self.search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def lookup_allowed(self, lookup, value):
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in self.model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specifically included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field, _, _, _ = self.model._meta.get_field_by_name(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif isinstance(field, ForeignObjectRel):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise DisallowedModelAdminLookup(
"Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(
request,
lookup_params,
self.model,
self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given
# field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field = list_filter
field_list_filter_class = FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model,
field_path)[-1]
spec = field_list_filter_class(
field,
request,
lookup_params,
self.model,
self.model_admin,
field_path=field_path)
# Check if we need to use distinct()
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts, key))
return (
filter_specs, bool(filter_specs), lookup_params, use_distinct
)
except FieldDoesNotExist as e:
six.reraise(
IncorrectLookupParameters,
IncorrectLookupParameters(e),
sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
def get_default_ordering(self, request):
if self.model_admin.get_ordering(request):
return self.model_admin.get_ordering(request)
if self.opts.ordering:
return self.opts.ordering
return ()
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.get_default_ordering(request))
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying
# sort field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if ORDER_VAR not in self.params:
# for ordering specified on model_admin or model Meta, we don't
# know the right column numbers absolutely, because there might be
# morr than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.get_base_queryset(request)
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.select_related is True:
return qs.select_related()
if self.select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.select_related:
return qs.select_related(*self.select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field, models.ManyToOneRel):
return True
return False
def get_context_data(self, request, *args, **kwargs):
user = request.user
has_add_permission = self.permission_helper.has_add_permission(user)
all_count = self.get_base_queryset(request).count()
queryset = self.get_queryset(request)
result_count = queryset.count()
paginator = Paginator(queryset, self.items_per_page)
try:
page_obj = paginator.page(self.page_num + 1)
except InvalidPage:
page_obj = paginator.page(1)
context = {
'view': self,
'all_count': all_count,
'result_count': result_count,
'paginator': paginator,
'page_obj': page_obj,
'object_list': page_obj.object_list,
'has_add_permission': has_add_permission,
}
if self.is_pagemodel:
allowed_parent_types = self.model.allowed_parent_page_types()
user = request.user
valid_parents = self.permission_helper.get_valid_parent_pages(user)
valid_parent_count = valid_parents.count()
context.update({
'no_valid_parents': not valid_parent_count,
'required_parent_types': allowed_parent_types,
})
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data(request, *args, **kwargs)
if request.session.get('return_to_index_url'):
del(request.session['return_to_index_url'])
return self.render_to_response(context)
def get_template_names(self):
return self.model_admin.get_index_template()
class CreateView(WMAFormView):
page_title = _('New')
def dispatch(self, request, *args, **kwargs):
if not self.permission_helper.has_add_permission(request.user):
return permission_denied(request)
if self.is_pagemodel:
user = request.user
parents = self.permission_helper.get_valid_parent_pages(user)
parent_count = parents.count()
# There's only one available parent for this page type for this
# user, so we send them along with that as the chosen parent page
if parent_count == 1:
parent = parents.get()
return redirect(
'wagtailadmin_pages_create', self.opts.app_label,
self.opts.model_name, parent.pk)
# The page can be added in multiple places, so redirect to the
# choose_parent_page view so that the parent can be specified
return redirect(self.model_admin.get_choose_parent_page_url())
return super(CreateView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Create new %s') % self.model_name.lower()
def get_page_subtitle(self):
return self.model_name
def get_template_names(self):
return self.model_admin.get_create_template()
class ChooseParentPageView(WMABaseView):
def dispatch(self, request, *args, **kwargs):
if not self.permission_helper.has_add_permission(request.user):
return permission_denied(request)
return super(ChooseParentPageView, self).dispatch(request, *args,
**kwargs)
def get_page_title(self):
return _('Add %s') % self.model_name
def get_form(self, request):
parents = self.permission_helper.get_valid_parent_pages(request.user)
return ParentChooserForm(parents, request.POST or None)
def get(self, request, *args, **kwargs):
form = self.get_form(request)
if form.is_valid():
parent = form.cleaned_data['parent_page']
return redirect('wagtailadmin_pages_create', self.opts.app_label,
self.opts.model_name, quote(parent.pk))
context = {'view': self, 'form': form}
return render(request, self.get_template(), context)
def get_template(self):
return self.model_admin.get_choose_parent_page_template()
class EditView(ObjectSpecificView, CreateView):
page_title = _('Editing')
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_edit_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
if self.is_pagemodel:
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_edit', self.object_id)
return super(CreateView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Editing %s') % self.model_name.lower()
def page_subtitle(self):
return self.instance
def get_success_message(self, instance):
return _("{model_name} '{instance}' updated.").format(
model_name=self.model_name, instance=instance)
def get_error_message(self):
model_name = self.model_name.lower()
return _("The %s could not be saved due to errors.") % model_name
def get_template_names(self):
return self.model_admin.get_edit_template()
class DeleteView(ObjectSpecificView):
page_title = _('Delete')
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_delete_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
if self.is_pagemodel:
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_delete', self.object_id)
return super(DeleteView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Confirm deletion of %s') % self.model_name.lower()
def get_page_subtitle(self):
return self.instance
def confirmation_message(self):
return _(
"Are you sure you want to delete this %s? If other things in your "
"site are related to it, they may also be effected."
) % self.model_name
def get(self, request, *args, **kwargs):
instance = self.instance
if request.POST:
instance.delete()
messages.success(
request,
_("{model_name} '{instance}' deleted.").format(
model_name=self.model_name, instance=instance))
return redirect(self.get_index_url)
context = {'view': self, 'instance': self.instance}
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_template_names(self):
return self.model_admin.get_delete_template()
class UnpublishRedirectView(ObjectSpecificView):
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_unpublish_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_unpublish', self.object_id)
class CopyRedirectView(ObjectSpecificView):
def check_action_permitted(self):
user = self.request.user
return self.permission_helper.can_copy_object(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted():
return permission_denied(request)
self.prime_session_for_redirection()
return redirect('wagtailadmin_pages_copy', self.object_id)
| mit | 5,287,674,027,923,612,000 | 37.209618 | 79 | 0.595134 | false |
iamahuman/angr | angr/exploration_techniques/loop_seer.py | 1 | 6356 | import logging
from . import ExplorationTechnique
from ..knowledge_base import KnowledgeBase
from ..knowledge_plugins.functions import Function
l = logging.getLogger(name=__name__)
class LoopSeer(ExplorationTechnique):
"""
This exploration technique monitors exploration and maintains all
loop-related data (well, currently it is just the loop trip counts, but feel
free to add something else).
"""
def __init__(self, cfg=None, functions=None, loops=None, use_header=False, bound=None, bound_reached=None, discard_stash='spinning'):
"""
:param cfg: Normalized CFG is required.
:param functions: Function(s) containing the loop(s) to be analyzed.
:param loops: Loop(s) to be analyzed.
:param use_header: Whether to use header based trip counter to compare with the bound limit.
:param bound: Limit the number of iteration a loop may be executed.
:param bound_reached: If provided, should be a function that takes a SimulationManager and returns
a SimulationManager. Will be called when loop execution reach the given bound.
Default to moving states that exceed the loop limit to a discard stash.
:param discard_stash: Name of the stash containing states exceeding the loop limit.
"""
super(LoopSeer, self).__init__()
self.cfg = cfg
self.functions = functions
self.bound = bound
self.bound_reached = bound_reached
self.discard_stash = discard_stash
self.use_header = use_header
self.loops = {}
if type(loops) is Loop:
loops = [loops]
if type(loops) in (list, tuple) and all(type(l) is Loop for l in loops):
for loop in loops:
if loop.entry_edges:
self.loops[loop.entry_edges[0][0].addr] = loop
elif loops is not None:
raise TypeError("Invalid type for 'loops' parameter!")
def setup(self, simgr):
if self.cfg is None:
cfg_kb = KnowledgeBase(self.project)
self.cfg = self.project.analyses.CFGFast(kb=cfg_kb, normalize=True)
elif not self.cfg.normalized:
l.warning("LoopSeer must use a normalized CFG. Normalizing the provided CFG...")
self.cfg.normalize()
funcs = None
if type(self.functions) in (str, int, Function):
funcs = [self._get_function(self.functions)]
elif type(self.functions) in (list, tuple) and all(type(f) in (str, int, Function) for f in self.functions):
funcs = []
for f in self.functions:
func = self._get_function(f)
if func is not None:
funcs.append(func)
funcs = None if not funcs else funcs
elif self.functions is not None:
raise TypeError("Invalid type for 'functions' parameter!")
if not self.loops:
loop_finder = self.project.analyses.LoopFinder(kb=self.cfg.kb, normalize=True, functions=funcs)
for loop in loop_finder.loops:
if loop.entry_edges:
entry = loop.entry_edges[0][0]
self.loops[entry.addr] = loop
def step(self, simgr, stash='active', **kwargs):
for state in simgr.stashes[stash]:
# Processing a currently running loop
if state.loop_data.current_loop:
loop = state.loop_data.current_loop[-1][0]
header = loop.entry.addr
if state.addr == header:
continue_addrs = [e[0].addr for e in loop.continue_edges]
if state.history.addr in continue_addrs:
state.loop_data.back_edge_trip_counts[state.addr][-1] += 1
state.loop_data.header_trip_counts[state.addr][-1] += 1
elif state.addr in state.loop_data.current_loop[-1][1]:
state.loop_data.current_loop.pop()
if self.bound is not None:
counts = state.loop_data.back_edge_trip_counts[header][-1] if not self.use_header else \
state.loop_data.header_trip_counts[header][-1]
if counts > self.bound:
if self.bound_reached is not None:
simgr = self.bound_reached(simgr)
else:
simgr.stashes[stash].remove(state)
simgr.stashes[self.discard_stash].append(state)
l.debug("%s back edge based trip counts %s", state, state.loop_data.back_edge_trip_counts)
l.debug("%s header based trip counts %s", state, state.loop_data.header_trip_counts)
# Loop entry detected. This test is put here because in case of
# nested loops, we want to handle the outer loop before proceeding
# the inner loop.
if state.addr in self.loops:
loop = self.loops[state.addr]
header = loop.entry.addr
exits = [e[1].addr for e in loop.break_edges]
state.loop_data.back_edge_trip_counts[header].append(0)
state.loop_data.header_trip_counts[header].append(0)
state.loop_data.current_loop.append((loop, exits))
simgr.step(stash=stash, **kwargs)
return simgr
def successors(self, simgr, state, **kwargs):
node = self.cfg.get_any_node(state.addr)
if node is not None:
kwargs['num_inst'] = min(kwargs.get('num_inst', float('inf')), len(node.instruction_addrs))
return simgr.successors(state, **kwargs)
def _get_function(self, func):
f = None
if type(func) is str:
f = self.cfg.kb.functions.function(name=func)
if f is None:
l.warning("Function '%s' doesn't exist in the CFG. Skipping...", func)
elif type(func) is int:
f = self.cfg.kb.functions.function(addr=func)
if f is None:
l.warning("Function at 0x%x doesn't exist in the CFG. Skipping...", func)
elif type(func) is Function:
f = func
return f
from ..analyses.loopfinder import Loop
| bsd-2-clause | -423,667,648,311,579,200 | 41.373333 | 137 | 0.575519 | false |
danbordeanu/docker-proxy | unit_tests.py | 1 | 1864 | __author__ = 'danbordeanu'
import unittest
from config_parser import check_if_config_exists
import config_parser as parser
import random_generator as random_generator_function
class Md5Test(unittest.TestCase):
def test_is_there_config(self):
"""
Test if there is config file
:return:
"""
print 'test 1 - test if there is config file in place'
self.assertTrue(check_if_config_exists('config.ini'), 'Uhhh, no config file')
def test_random_port_generator_not_restricred(self):
"""
Test if random generated port is not in restricted list
:return:
"""
print 'test 2 - test if random generated port is not in the excluded list'
my_excluded_list = parser.config_params('rand_exclusion')['exclude_ports'].split()
my_port = random_generator_function.generator_instance.random_port()
for i in my_excluded_list:
self.assertNotEqual(my_port, i, 'This port should not be generated')
def test_random_port_generator_is_int(self):
"""
test if random port is generating an int
:return:
"""
print 'test 3 - test if generated radom port is int'
assert type(random_generator_function.generator_instance.random_port()) is int, \
'returned port is not integer {0}'.format(random_generator_function.generator_instance.random_port())
def test_random_volume_is_string(self):
"""
test of random volume name is str
:return:
"""
print 'test 4 - test if generated random volume is str'
assert type(random_generator_function.generator_instance.random_volume()) is str, \
'returner random volume is not str {0}'.format(random_generator_function.generator_instance.random_volume())
if __name__ == '__main__':
unittest.main() | gpl-3.0 | -6,510,117,579,252,129,000 | 37.061224 | 120 | 0.647532 | false |
alfa-addon/addon | plugin.video.alfa/channels/pedropolis.py | 1 | 17282 | # -*- coding: utf-8 -*-
# -*- Channel PedroPolis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import channeltools
from core import tmdb
from platformcode import config, logger
from channelselector import get_thumb
__channel__ = "pedropolis"
host = "https://pedropolis.tv/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
__perfil__ = int(config.get_setting('perfil', __channel__))
except:
__modo_grafico__ = True
__perfil__ = 0
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
IDIOMAS = {'Latino': 'LAT'}
list_language = list(IDIOMAS.values())
list_quality = []
list_servers = ['rapidvideo', 'streamango', 'fastplay', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = [item.clone(title="Peliculas", action="menumovies", text_blod=True,
viewcontent='movies', viewmode="movie_with_plot", thumbnail=get_thumb("channels_movie.png")),
item.clone(title="Series", action="menuseries", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tvshows/', viewmode="movie_with_plot",
thumbnail=get_thumb("channels_tvshow.png")),
item.clone(title="Buscar", action="search", text_blod=True, extra='buscar',
thumbnail=get_thumb('search.png'), url=host)]
autoplay.show_option(item.channel, itemlist)
return itemlist
def menumovies(item):
logger.info()
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'pelicula/',
viewcontent='movies', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Mejor Valoradas", action="peliculas", text_blod=True,
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
viewcontent='movies', url=host, viewmode="movie_with_plot"),
item.clone(title="Por género", action="p_portipo", text_blod=True, extra="Categorías",
viewcontent='movies', url=host, viewmode="movie_with_plot")]
return itemlist
def menuseries(item):
logger.info()
itemlist = [item.clone(title="Todas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'serie/', viewmode="movie_with_plot"),
item.clone(title="Más Vistas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'tendencias/?get=tv', viewmode="movie_with_plot"),
item.clone(title="Mejor Valoradas", action="series", text_blod=True, extra='serie', mediatype="tvshow",
viewcontent='tvshows', url=host + 'calificaciones/?get=tv', viewmode="movie_with_plot")]
return itemlist
def p_portipo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '(?is)%s.*?</ul>' %item.extra)
patron = 'href="([^"]+).*?'
patron += '>([^"<]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action = "peliculas",
title = scrapedtitle,
url = scrapedurl
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster">[^<]+<img src="([^"]+)" alt="([^"]+)">[^<]+' # img, title
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+)' # rating
patron += '.*?<span class="quality">([^<]+)</span> </div>.*?<a href="([^"]+)">.*?' # calidad, url
patron += '<span>([^<]+)</span>.*?' # year
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches:
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
'Español Latino', '').strip()
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
url=scrapedurl, quality=quality, title=title))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if pagination:
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png")))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = urlparse.urljoin(item.url, "?s={0}".format(texto))
try:
return sub_search(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
def sub_search(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'Resultados encontrados.*?class="widget widget_fbw_id')
patron = '(?is)<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += 'alt="([^"]+)" />.*?' # url, img, title
patron += '<span class="[^"]+">([^<]+)</span>.*?' # tipo
patron += '<span class="year">([^"]+)' # year
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
title = scrapedtitle
if tipo == ' Serie ':
contentType = 'tvshow'
action = 'temporadas'
title += ' [COLOR red](' + tipo + ')[/COLOR]'
else:
contentType = 'movie'
action = 'findvideos'
title += ' [COLOR green](' + tipo + ')[/COLOR]'
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar',
action=action, infoLabels={"year": year}, contentType=contentType,
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if pagination:
itemlist.append(Item(channel=item.channel, action="sub_search",
title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png")))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host + 'movies/'
elif categoria == 'infantiles':
item.url = host + "genre/animacion/"
elif categoria == 'terror':
item.url = host + "genre/terror/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == "» Siguiente »":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="poster"> <img src="([^"]+)"'
patron += ' alt="([^"]+)">.*?'
patron += '<a href="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.replace('’', "'")
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
url=scrapedurl, thumbnail=scrapedthumbnail,
contentSerieName=scrapedtitle, show=scrapedtitle,
action="temporadas", contentType='tvshow'))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
if pagination:
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
thumbnail=get_thumb("next.png")))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="title">([^<]+)<i>.*?' # season
patron += '<img src="([^"]+)"></a></div>' # img
matches = scrapertools.find_multiple_matches(data, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
scrapedseason = " ".join(scrapedseason.split())
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='serie')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
# Si la temporada tiene nombre propio añadírselo al titulo del item
i.title += " - %s" % (i.infoLabels['title'])
if 'poster_path' in i.infoLabels:
# Si la temporada tiene poster propio remplazar al de la serie
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
else:
return episodios(item)
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+) - (\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
season, episode = match[0]
if 'season' in item.infoLabels and int(item.infoLabels['season']) != int(season):
continue
title = "%sx%s: %s" % (season, episode.zfill(2), scrapertools.unescape(scrapedname))
new_item = item.clone(title=title, url=scrapedurl, action="findvideos", text_color=color3, contentTitle=title,
contentType="episode", extra='serie')
if 'infoLabels' not in new_item:
new_item.infoLabels = {}
new_item.infoLabels['season'] = season
new_item.infoLabels['episode'] = episode.zfill(2)
itemlist.append(new_item)
# TODO no hacer esto si estamos añadiendo a la videoteca
if not item.extra:
# Obtenemos los datos de todos los capítulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadírselo al titulo del item
i.title = "%sx%s: %s" % (i.infoLabels['season'], i.infoLabels['episode'], i.infoLabels['title'])
if 'poster_path' in i.infoLabels:
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
itemlist.sort(key=lambda it: int(it.infoLabels['episode']),
reverse=config.get_setting('orden_episodios', __channel__))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
# Opción "Añadir esta serie a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=get_thumb("videolibrary_tvshow.png"), fanart=fanart_host))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div id="option-(\d+)".*?<iframe.*?src="([^"]+)".*?</iframe>' # lang, url
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>(.*?)<span' % option)
lang = lang.lower().strip()
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
'drive': '[COLOR cornflowerblue](LAT)[/COLOR]',
'castellano': '[COLOR green](CAST)[/COLOR]',
'español': '[COLOR green](CAST)[/COLOR]',
'subtitulado': '[COLOR red](VOS)[/COLOR]',
'ingles': '[COLOR red](VOS)[/COLOR]'}
if lang in idioma:
lang = idioma[lang]
# obtenemos los redirecionamiento de shorturl en caso de coincidencia
if "bit.ly" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone(channel=__channel__, url=url, title=item.contentTitle,
action='play', language=lang))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.sort(key=lambda it: it.language, reverse=False)
for x in itemlist:
if x.extra != 'directo':
x.thumbnail = item.thumbnail
x.title = "Ver en: [COLOR yellow](%s)[/COLOR] %s" % (x.server.title(), x.language)
if item.extra != 'serie' and item.extra != 'buscar':
x.title = "Ver en: [COLOR yellowgreen](%s)[/COLOR] [COLOR yellow](%s)[/COLOR] %s" % (
x.server.title(), x.quality, x.language)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'serie':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=get_thumb("videolibrary_movie.png"), contentTitle=item.contentTitle))
return itemlist
| gpl-3.0 | 5,684,872,392,204,007,000 | 44.755968 | 123 | 0.589159 | false |
Timidger/Mao | server.py | 1 | 1240 | import time
import threading
import atexit
from src.Server.Server import Server
from src.Server.PlayerHandler import PlayerHandler
from src.Server.RuleHandler import RuleHandler
from src.Base.Rule import Rule
from src.Base.OptionsParser import load_configuration
start_time = time.time()
def test_rule(server_object):
global a
a = exit
config_parser = load_configuration("server")
rules = [Rule('Test Rule', None, test_rule)]
RH = RuleHandler(rules)
PH = PlayerHandler([])
ip = config_parser.get('Misc.', 'ip')
port = config_parser.getint('Misc.', 'port')
players = PH.players
server = Server(RH, PH, config_parser, port, ip)
clients = server.clients
deck = server.deck
pile = server.pile
print 'Server at {}:{}'.format(ip or 'localhost', port)
MAIN_THREAD = threading.Thread(target = server.main_loop)
print "Type 'MAIN_THREAD.start()' to start the game!"
def uptime():
print "The server has been running for {} seconds".format(
int(time.time() - start_time))
def kick(player_name):
for player in players:
if player.name == player_name:
server.disconnect(server.get_client(player))
else:
raise KeyError, 'No player named "{}"!'.format(player_name)
atexit.register(server.shutdown)
| mit | 3,313,964,386,087,644,000 | 28.52381 | 67 | 0.714516 | false |
jaumemarti/l10n-spain-txerpa | l10n_es_bank_statement/models/c43_account_statement_profile.py | 1 | 1611 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) All rights reserved:
# 2013-2014 Servicios Tecnológicos Avanzados (http://serviciosbaeza.com)
# Pedro Manuel Baeza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import orm
class AccountStatementProfil(orm.Model):
_inherit = "account.statement.profile"
def _get_import_type_selection(self, cr, uid, context=None):
"""Inherited from parent to add parser."""
selection = super(AccountStatementProfil, self
)._get_import_type_selection(cr, uid,
context=context)
selection.append(('aeb_c43', _('AEB C43 standard')))
return selection
| agpl-3.0 | -7,381,634,109,124,706,000 | 45 | 79 | 0.601242 | false |
BCCVL/org.bccvl.testsetup | src/org/bccvl/testsetup/main.py | 1 | 9331 | """ call this with:
./bin/instance run src/org.bccvl.testestup/src/org/bccvl/testsetup/main.py ....
make sure ./bin/instance is down while doing this
"""
import sys
import logging
from AccessControl.SecurityManagement import newSecurityManager
from AccessControl.SpecialUsers import system
from Testing.makerequest import makerequest
import transaction
from AccessControl.SecurityManager import setSecurityPolicy
from Products.CMFCore.tests.base.security import PermissiveSecurityPolicy, OmnipotentUser
from collective.transmogrifier.transmogrifier import Transmogrifier
import argparse
# TODO: if item/file id already exists, then just updload/update metadata
try:
from zope.component.hooks import site
except ImportError:
# we have an older zope.compenents:
import contextlib
from zope.component.hooks import getSite, setSite
@contextlib.contextmanager
def site(site):
old_site = getSite()
setSite(site)
try:
yield
finally:
setSite(old_site)
LOG = logging.getLogger('org.bccvl.testsetup')
def import_data(site, params):
source_options = {}
if params.get('dev', 'False'):
source_options = {
'devsource': {
'enabled': "True"}
}
elif params.get('test', 'False'):
# run test imports only
source_options = {
'a5ksource': {
'emsc': 'RCP3PD',
'gcm': 'cccma-cgcm31',
'year': '2015, 2025',
'enabled': "True"
},
'nsgsource': {
'enabled': 'True',
},
'vastsource': {
'enabled': 'True',
},
}
elif params.get('all', 'False'):
# import all known datasources:
source_options = {
'a5ksource': {'enabled': "True"},
'a1ksource': {'enabled': "True"},
'a250source': {'enabled': "True"},
'nsgsource': {'enabled': "True"},
'vastsource': {'enabled': "True"},
'mrrtfsource': {'enabled': "True"},
'mrvbfsource': {'enabled': "True"},
# TODO: maybe re-design this one to handle years differently
'awapsource': {'enabled': "False"},
'petsource': {'enabled': "False"},
'ndlcsource': {'enabled': "False"},
'wccsource': {'enabled': "False"},
'wcfsource': {'enabled': "False"},
'gppsource': {'enabled': "False"},
'fparsource': {'enabled': "False"},
'cruclimsource': {'enabled': "False"},
'accuclimsource': {'enabled': "False"},
'tasclimsource': {'enabled': "False"},
'climondsource': {'enabled': "False"},
'narclimsource': {'enabled': "False"},
'anuclimsource': {'enabled': "False"},
'geofabricsource': {'enabled': "False"},
'nvissource': {'enabled': "False"},
'austsubsfertsource': {'enabled': "False"},
'currentglobalmarinesource': {'enabled': "False"},
'futureglobalmarinesource': {'enabled': "False"},
'marspecmarinesource': {'enabled': "False"}
}
else:
for fcsource in ('a5ksource', 'a1ksource', 'a250source', 'awapsource', 'wcfsource'):
if params.get(fcsource, False):
source_options[fcsource] = {'enabled': 'True'}
for p in ['emsc', 'gcm', 'year']:
if params.get(p, None):
source_options[fcsource][p] = \
params.get(p, '')
for source in ['nsgsource', 'vastsource',
'mrrtfsource', 'mrvbfsource',
'petsource', 'ndlcsource',
'wccsource', 'gppsource',
'fparsource', 'cruclimsource',
'accuclimsource', 'tasclimsource',
'climondsource', 'narclimsource',
'nvissource', 'anuclimsource',
'austsubsfertsource', 'currentglobalmarinesource',
'futureglobalmarinesource', 'marspecmarinesource']:
if params.get(source, False):
source_options[source] = {'enabled': 'True'}
for source in ['geofabricsource']:
if params.get(source, False):
source_options[source] = {'enabled': 'True'}
for p in ['btype', 'dstype']:
if params.get(p, None):
source_options[source][p] = params.get(p, '')
source_options['updatemetadata'] = {
'siteurl': params.get('siteurl', ''),
'sync': str(params.get('sync'))
}
if params.get('sync'):
# in case we do in process metadata update we can commit
# after every item
source_options['commit'] ={
'every': '1'
}
transmogrifier = Transmogrifier(site)
transmogrifier(u'org.bccvl.testsetup.dataimport',
**source_options)
transaction.commit()
def spoofRequest(app):
"""
Make REQUEST variable to be available on the Zope application server.
This allows acquisition to work properly
"""
_policy = PermissiveSecurityPolicy()
_oldpolicy = setSecurityPolicy(_policy)
newSecurityManager(None, OmnipotentUser().__of__(app.acl_users))
return makerequest(app)
def main(app, params):
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root_logger.addHandler(handler)
logging.getLogger('ZODB.Connection').setLevel(logging.WARN)
# logging.getLogger('').setLevel(logging.WARN)
# logging.getLogger('').setLevel(logging.WARN)
app = spoofRequest(app)
newSecurityManager(None, system)
# TODO: works only if site id is bccvl
portal = app.unrestrictedTraverse('bccvl')
# we didn't traverse, so we have to set the proper site
with site(portal):
import_data(portal, params)
def parse_args(args):
parser = argparse.ArgumentParser(description='Import datasets.')
parser.add_argument('--siteurl')
parser.add_argument('--sync', action='store_true')
parser.add_argument('--dev', action='store_true')
parser.add_argument('--test', action='store_true')
parser.add_argument('--all', action='store_true')
parser.add_argument('--a5ksource', action='store_true')
parser.add_argument('--a1ksource', action='store_true')
parser.add_argument('--a250source', action='store_true')
parser.add_argument('--gcm')
parser.add_argument('--emsc')
parser.add_argument('--year')
parser.add_argument('--austsubsfertsource', action='store_true')
parser.add_argument('--nsgsource', action='store_true')
parser.add_argument('--vastsource', action='store_true')
parser.add_argument('--mrrtfsource', action='store_true')
parser.add_argument('--mrvbfsource', action='store_true')
parser.add_argument('--awapsource', action='store_true')
parser.add_argument('--petsource', action='store_true')
parser.add_argument('--ndlcsource', action='store_true')
parser.add_argument('--wccsource', action='store_true')
parser.add_argument('--wcfsource', action='store_true')
parser.add_argument('--gppsource', action='store_true')
parser.add_argument('--fparsource', action='store_true')
parser.add_argument('--cruclimsource', action='store_true')
parser.add_argument('--accuclimsource', action='store_true')
parser.add_argument('--tasclimsource', action='store_true')
parser.add_argument('--climondsource', action='store_true')
parser.add_argument('--narclimsource', action='store_true')
parser.add_argument('--geofabricsource', action='store_true')
parser.add_argument('--nvissource', action='store_true')
parser.add_argument('--anuclimsource', action='store_true')
parser.add_argument('--currentglobalmarinesource', action='store_true')
parser.add_argument('--futureglobalmarinesource', action='store_true')
parser.add_argument('--marspecmarinesource', action='store_true')
# Additional parameters for Geofabric datasets
parser.add_argument('--btype', type=str, choices=['catchment', 'stream'], help='Geofabric boundary type')
parser.add_argument('--dstype', type=str, help='Geofabric dataset type i.e. climate, vegetation')
pargs = parser.parse_args(args)
return vars(pargs)
def zopectl(app, args):
""" zopectl entry point
app ... the Zope root Application
args ... list of command line args passed (very similar to sys.argv)
args[0] ... name of script but is always '-c'
args[1] ... name of entry point
args[2:] ... all additional commandline args
"""
# get rid of '-c'
if args[0] == '-c':
args.pop(0)
# now args looks pretty much like sys.argv
params = parse_args(args[1:])
# ok let's do some import'
main(app, params)
if 'app' in locals():
# we have been started via ./bin/instance run main.py
# but ideally should be run via ./bin/instance testsetup
main(app, parse_args(sys.argv[4:]))
| gpl-2.0 | -6,745,505,773,619,150,000 | 39.04721 | 109 | 0.598864 | false |
emidln/django_roa | django_roa/db/exceptions.py | 1 | 2114 | from django.conf import settings
from django.utils.html import strip_tags
from django.utils.text import unescape_entities
from django.utils.encoding import force_unicode
ROA_DJANGO_ERRORS = getattr(settings, 'ROA_DJANGO_ERRORS', False)
class ROAException(Exception):
def __init__(self, exception):
if ROA_DJANGO_ERRORS and 'message' in exception \
and 'status_code' in exception:
self.msg = force_unicode(exception.message)
self.status_code = exception.status_code
else:
self.msg = force_unicode(exception)
self.status_code = 'XXX'
def __str__(self):
if ROA_DJANGO_ERRORS and '<body>' in self.msg:
return self.parse_django_error()
return self.msg
def parse_django_error(self):
"""Extract the summary part of a Django HTML error."""
try:
summary = self.msg.split(u'<body>\n<div id="summary">\n ', 1)[1]\
.split(u'<th>Python Executable:</th>', 1)[0]
traceback = self.msg.split(u'\n\nTraceback:', 1)[1]\
.split(u'</textarea>', 1)[0]
except IndexError:
return self.msg
result = []
title = None
for line in strip_tags(summary).split('\n'):
line_content = unescape_entities(line.strip())
if line_content:
if line_content.endswith(':'):
title = line_content
elif title is None:
title = "%s:" % line_content
else:
result.append("%s %s\n" % (title, line_content))
result.append("Status code: %s" % self.status_code)
indent, indent2 = u' ', u' '
return u"%(summary)s %(traceback)s".strip() % {
'summary': indent.join(force_unicode(line) for line in result),
'traceback': indent2.join(force_unicode(line+"\n") \
for line in traceback.split('\n')),
}
class ROANotImplementedYetException(Exception):
pass
| bsd-3-clause | 5,526,580,686,477,947,000 | 38.886792 | 78 | 0.544465 | false |
hunter-87/binocular-dense-stereo | StereoVision-master/stereovision/ui_utils.py | 1 | 9561 | # Copyright (C) 2014 Daniel Lee <[email protected]>
#
# This file is part of StereoVision.
#
# StereoVision is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# StereoVision is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with StereoVision. If not, see <http://www.gnu.org/licenses/>.
"""
Utilities for easing user interaction with the ``stereovision`` package.
Variables:
* ``CHESSBOARD_ARGUMENTS`` - ``argparse.ArgumentParser`` for working with
chessboards
* ``STEREO_BM_FLAG`` - ``argparse.ArgumentParser`` for using StereoBM
Functions:
* ``find_files`` - Discover stereo images in directory
* ``calibrate_folder`` - Calibrate chessboard images discoverd in a folder
Classes:
* ``BMTuner`` - Tune block matching algorithm to camera pair
.. image:: classes_ui_utils.svg
"""
from argparse import ArgumentParser
from functools import partial
import os
import cv2
from progressbar import ProgressBar, Percentage, Bar
from stereovision.calibration import StereoCalibrator
from stereovision.exceptions import BadBlockMatcherArgumentError
#: Command line arguments for collecting information about chessboards
CHESSBOARD_ARGUMENTS = ArgumentParser(add_help=False)
CHESSBOARD_ARGUMENTS.add_argument("--rows", type=int,
help="Number of inside corners in the "
"chessboard's rows.", default=9)
CHESSBOARD_ARGUMENTS.add_argument("--columns", type=int,
help="Number of inside corners in the "
"chessboard's columns.", default=6)
CHESSBOARD_ARGUMENTS.add_argument("--square-size", help="Size of chessboard "
"squares in cm.", type=float, default=1.8)
#: Command line arguments for using StereoBM rather than StereoSGBM
STEREO_BM_FLAG = ArgumentParser(add_help=False)
STEREO_BM_FLAG.add_argument("--use_stereobm", help="Use StereoBM rather than "
"StereoSGBM block matcher.", action="store_true")
def find_files(folder):
"""Discover stereo photos and return them as a pairwise sorted list."""
files = [i for i in os.listdir(folder) if i.startswith("left")]
files.sort()
for i in range(len(files)):
insert_string = "right{}".format(files[i * 2][4:])
files.insert(i * 2 + 1, insert_string)
files = [os.path.join(folder, filename) for filename in files]
return files
def calibrate_folder(args):
"""
Calibrate camera based on chessboard images, write results to output folder.
All images are read from disk. Chessboard points are found and used to
calibrate the stereo pair. Finally, the calibration is written to the folder
specified in ``args``.
``args`` needs to contain the following fields:
input_files: List of paths to input files
rows: Number of rows in chessboard
columns: Number of columns in chessboard
square_size: Size of chessboard squares in cm
output_folder: Folder to write calibration to
"""
height, width = cv2.imread(args.input_files[0]).shape[:2]
calibrator = StereoCalibrator(args.rows, args.columns, args.square_size,
(width, height))
progress = ProgressBar(maxval=len(args.input_files),
widgets=[Bar("=", "[", "]"),
" ", Percentage()])
print("Reading input files...")
while args.input_files:
left, right = args.input_files[:2]
img_left, im_right = cv2.imread(left), cv2.imread(right)
calibrator.add_corners((img_left, im_right),
show_results=args.show_chessboards)
args.input_files = args.input_files[2:]
progress.update(progress.maxval - len(args.input_files))
progress.finish()
print("Calibrating cameras. This can take a while.")
calibration = calibrator.calibrate_cameras()
avg_error = calibrator.check_calibration(calibration)
print("The average error between chessboard points and their epipolar "
"lines is \n"
"{} pixels. This should be as small as possible.".format(avg_error))
calibration.export(args.output_folder)
class BMTuner(object):
"""
A class for tuning Stereo BM settings.
Display a normalized disparity picture from two pictures captured with a
``CalibratedPair`` and allow the user to manually tune the settings for the
``BlockMatcher``.
The settable parameters are intelligently read from the ``BlockMatcher``,
relying on the ``BlockMatcher`` exposing them as ``parameter_maxima``.
"""
#: Window to show results in
window_name = "BM Tuner"
def _set_value(self, parameter, new_value):
"""Try setting new parameter on ``block_matcher`` and update map."""
try:
self.block_matcher.__setattr__(parameter, new_value)
except BadBlockMatcherArgumentError:
return
self.update_disparity_map()
def _initialize_trackbars(self):
"""
Initialize trackbars by discovering ``block_matcher``'s parameters.
"""
for parameter in self.block_matcher.parameter_maxima.keys():
maximum = self.block_matcher.parameter_maxima[parameter]
if not maximum:
maximum = self.shortest_dimension
cv2.createTrackbar(parameter, self.window_name,
self.block_matcher.__getattribute__(parameter),
maximum,
partial(self._set_value, parameter))
def _save_bm_state(self):
"""Save current state of ``block_matcher``."""
for parameter in self.block_matcher.parameter_maxima.keys():
self.bm_settings[parameter].append(
self.block_matcher.__getattribute__(parameter))
def __init__(self, block_matcher, calibration, image_pair):
"""
Initialize tuner window and tune given pair.
``block_matcher`` is a ``BlockMatcher``, ``calibration`` is a
``StereoCalibration`` and ``image_pair`` is a rectified image pair.
"""
#: Stereo calibration to find Stereo BM settings for
self.calibration = calibration
#: (left, right) image pair to find disparity between
self.pair = image_pair
#: Block matcher to be tuned
self.block_matcher = block_matcher
#: Shortest dimension of image
self.shortest_dimension = min(self.pair[0].shape[:2])
#: Settings chosen for ``BlockMatcher``
self.bm_settings = {}
for parameter in self.block_matcher.parameter_maxima.keys():
self.bm_settings[parameter] = []
cv2.namedWindow(self.window_name)
self._initialize_trackbars()
self.tune_pair(image_pair)
def update_disparity_map(self):
"""
Update disparity map in GUI.
The disparity image is normalized to the range 0-255 and then divided by
255, because OpenCV multiplies it by 255 when displaying. This is
because the pixels are stored as floating points.
"""
disparity = self.block_matcher.get_disparity(self.pair)
norm_coeff = 255 / disparity.max()
cv2.imshow(self.window_name, disparity * norm_coeff / 255)
cv2.waitKey()
def tune_pair(self, pair):
"""Tune a pair of images."""
self._save_bm_state()
self.pair = pair
self.update_disparity_map()
def report_settings(self, parameter):
"""
Report chosen settings for ``parameter`` in ``block_matcher``.
``bm_settings`` is updated to include the latest state before work is
begun. This state is removed at the end so that the method has no side
effects. All settings are reported except for the first one on record,
which is ``block_matcher``'s default setting.
"""
self._save_bm_state()
report = []
settings_list = self.bm_settings[parameter][1:]
unique_values = list(set(settings_list))
value_frequency = {}
for value in unique_values:
value_frequency[settings_list.count(value)] = value
frequencies = value_frequency.keys()
frequencies.sort(reverse=True)
header = "{} value | Selection frequency".format(parameter)
left_column_width = len(header[:-21])
right_column_width = 21
report.append(header)
report.append("{}|{}".format("-" * left_column_width,
"-" * right_column_width))
for frequency in frequencies:
left_column = str(value_frequency[frequency]).center(
left_column_width)
right_column = str(frequency).center(right_column_width)
report.append("{}|{}".format(left_column, right_column))
# Remove newest settings
for param in self.block_matcher.parameter_maxima.keys():
self.bm_settings[param].pop(-1)
return "\n".join(report)
| gpl-2.0 | 6,180,144,040,576,407,000 | 39.858974 | 80 | 0.633616 | false |
Affirm/cabot | cabot/cabotapp/monitor.py | 1 | 1567 | from celery.signals import task_success, task_failure
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
try:
from boto.ec2 import cloudwatch
if not settings.AWS_CLOUDWATCH_SYNC:
CONNECTION = None
else:
region = settings.AWS_CLOUDWATCH_REGION
access_key = settings.AWS_CLOUDWATCH_ACCESS_KEY
secret_key = settings.AWS_CLOUDWATCH_SECRET_KEY
NAMESPACE = settings.AWS_CLOUDWATCH_NAMESPACE
PREFIX = settings.AWS_CLOUDWATCH_PREFIX
CONNECTION = cloudwatch.connect_to_region(
region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
except ImportError:
NAMESPACE = None
PREFIX = None
CONNECTION = None
def _notify_cloudwatch(task_name, state):
'''
Update cloudwatch with a metric alert about a task
'''
if CONNECTION:
if PREFIX:
metric = '%s.%s.%s' % (PREFIX, task_name, state)
else:
metric = '%s.%s' % (task_name, state)
try:
CONNECTION.put_metric_data(NAMESPACE, metric, 1)
except:
logger.exception('Error sending cloudwatch metric')
@task_success.connect
def notify_success(sender=None, *args, **kwargs):
'''
Update cloudwatch about a task success
'''
_notify_cloudwatch(sender.name, 'success')
@task_failure.connect
def notify_failure(sender=None, *args, **kwargs):
'''
Update cloudwatch about a task failure
'''
_notify_cloudwatch(sender.name, 'failure')
| mit | -6,029,229,335,493,826,000 | 25.116667 | 63 | 0.635609 | false |
ye-zhi/project-epsilon | code/utils/tests/test_t-test.py | 1 | 1038 | """test_t-test.py
Tests for the functions in the t-test.py
Run with:
nosetests test_t-test.py
"""
from __future__ import print_function
import os, sys
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
#Append path to functions
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
from t_test import t_stat
def test_t_stat():
psychopathy = [11.416, 4.514, 12.204, 14.835,
8.416, 6.563, 17.343, 13.02,
15.19 , 11.902, 22.721, 22.324]
clammy = [0.389, 0.2 , 0.241, 0.463,
4.585, 1.097, 1.642, 4.972,
7.957, 5.585, 5.527, 6.964]
age = [22.5, 25.3, 24.6, 21.4,
20.7, 23.3, 23.8, 21.7,
21.3, 25.2, 24.6, 21.8]
X = np.column_stack((np.ones(12), clammy))
Y = np.asarray(psychopathy)
B, t, df, p = t_stat(Y, X)
assert_array_equal((np.around(t[1][:6],6),np.around(p[1][:6],6)),
( [1.914389], [0.042295]))
| bsd-3-clause | -6,699,563,336,864,459 | 32.483871 | 76 | 0.533719 | false |
kylebegovich/ProjectEuler | Python/Solved/Page2/Problem60.py | 1 | 2031 | from Euler import prime_sieve, is_prime
result = int(100000000)
primes = prime_sieve(30000)
pairs = None
def concat(first, second):
return int(str(first) + str(second))
def make_pairs(list_of_primes):
pairs = list()
if list_of_primes is None:
return
for elem in list_of_primes:
for other_elem in list_of_primes:
if elem is other_elem:
continue
pairs.append(concat(elem, other_elem))
return pairs
def main():
answers = list()
for index_a in range(0, len(primes)):
if primes[index_a] * 5 >= result: break
if pairs[index_a] is None: pairs[index_a] = make_pairs([index_a])
for index_b in range(index_a, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_b]]))]):
answers.append(primes[index_b])
for index_c in range(index_b, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_c]]))]):
answers.append(primes[index_c])
for index_d in range(index_c, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_d]]))]):
answers.append(primes[index_d])
for index_e in range(index_d, len(primes)):
if all([is_prime(n) for n in iter(make_pairs(answers + [primes[index_e]]))]):
answers.append(primes[index_e])
return answers
return "Failed", answers
def test_concat():
print(concat(5, 5))
print(concat(1512, 4))
print(concat(9, 0))
def test_make_pairs():
print(make_pairs([1, 3, 5]))
print(make_pairs([7, 9]))
print(make_pairs([75, 23, 18]))
test = make_pairs([3, 7, 109, 673])
for elem in iter(test):
print(elem, is_prime(elem))
print(main())
# SOLVED : 26033
| gpl-3.0 | 8,671,600,554,278,434,000 | 27.605634 | 113 | 0.529296 | false |
Pavel-Durov/pynetwork | pynetwork/mail.py | 1 | 6010 | """Script for generating mail content and sending emails to gmail accounts"""
import smtplib
import chart
import time
import fsutil
import timeutil
import logging
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from jinja2 import Environment
from email.mime.multipart import MIMEMultipart
class EmailSender(object):
"""Responsible for emails sending"""
SUBJECT_EMAIL = "Here is your network check update."
GMAIL_SMTP = 'smtp.gmail.com:587'
def send_gmail(self, message_content, chart_image_path):
"""Sends gmail to specified account"""
receiver = self.__config.get_receiver_gmail_account
logging.getLogger("PYNETWORK").info("sending email to: " + receiver)
server = smtplib.SMTP(self.GMAIL_SMTP)
server.ehlo()
server.starttls()
# Record the MIME types of both parts - text/plain and text/html.
sender = self.__config.get_agent_gmail_account
msg = MIMEMultipart('alternative')
msg['Subject'] = self.SUBJECT_EMAIL
msg['From'] = sender
msg['To'] = receiver
filename = chart.get_daily_chart_path(self.__config, timeutil.utc_now())
if self.__config.get_attach_mail_chart and fsutil.file_exist(filename):
self.__attach_chart(filename, msg)
if fsutil.file_exist(chart_image_path):
fp = open(chart_image_path, 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# Define the image's ID as referenced in html
msgImage.add_header('Content-ID', '<networkGraphImage>')
msg.attach(msgImage)
# Attach parts into message container.
msg.attach(MIMEText(message_content, 'html'))
if server.login(sender, self.__config.get_agent_gmail_password):
server.sendmail(sender, receiver, msg.as_string())
server.quit()
else:
logging.getLogger("PYNETWORK").error("could not login :(")
def __attach_chart(self, filename, msg):
attachment = open(filename, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
def __init__(self, config):
self.__config = config
class MessageFormatter(object):
"""Formats email content"""
OK_CSS_CLASS = "ok"
NOT_OK_CSS_CLASS = "not-ok"
def __init__(self, config):
self.__config = config
self.MAIL_TEMPLATE_PATH = config.PROJ_PATH + "/templates/html_templates/mail_template.html"
self.__env = Environment(line_statement_prefix='%',
variable_start_string="${",
variable_end_string="}")
def format_message(self, result):
"""Formats message as html"""
html = self.__create_html(result)
if self.__config.get_write_html_file:
fsutil.write_to_file(self.__config.OUTPUT_HTML_FILE, html)
return html
def __speed_check_title_html(self, result):
if self.__config.get_downlad_constraint != self.__config.UNSET_CONSTRAINT:
download_check = result.get_download_speed < self.__config.get_downlad_constraint
else:
download_check = False
if self.__config.get_upload_constraint != self.__config.UNSET_CONSTRAINT:
upload_check = result.get_upload_speed < self.__config.get_upload_constraint
else:
upload_check = False
if self.__config.get_ping_constraint != self.__config.UNSET_CONSTRAINT:
ping_check = result.get_ping_speed < self.__config.get_ping_constraint
else:
ping_check = False
title = 'Network'
ok_status = False
if download_check or upload_check or ping_check:
multiple = False
if download_check:
title = title + " download"
multiple = True
if upload_check:
if multiple:
title = title + ", "
title = title + " upload"
multiple = True
if ping_check:
if multiple:
title = title + ", "
title = title + " ping"
multiple = True
if multiple:
title = title + " issues"
else:
title = title + " issue"
else:
title = title + ' speed is OK'
ok_status = True
return {'content': title, 'status': ok_status}
def __create_html(self, result):
title = self.__speed_check_title_html(result)
#public_ip_addr = get('https://api.ipify.org').text
bcss_class = self.OK_CSS_CLASS if title["status"] else self.NOT_OK_CSS_CLASS
html_template = fsutil.get_file_content(self.MAIL_TEMPLATE_PATH)
tmpl = self.__env.from_string(html_template)
chart_img_src = chart.get_daily_chart_image_path(self.__config, result.get_time_stamp)
return tmpl.render(css=fsutil.get_file_content(self.__config.MAIN_CSS_PATH),
title=title["content"],
body_css_class=bcss_class,
ping_speed=str(result.get_ping_speed),
upload_speed=str(result.get_upload_speed),
download_speed=str(result.get_download_speed),
upload_constraint=str(self.__config.get_upload_constraint),
download_constraint=str(self.__config.get_downlad_constraint),
ping_constraint=str(self.__config.get_ping_constraint),
time_stamp=timeutil.format_to_time_str(result.get_time_stamp),
img_src=chart_img_src)
| mit | 7,932,398,313,926,550,000 | 34.988024 | 99 | 0.582196 | false |
ericflo/django-tokyo-sessions | tokyo_sessions/tests.py | 1 | 1325 | r"""
>>> from django.conf import settings
>>> from tokyo_sessions.tyrant import SessionStore as TokyoSession
>>> tokyo_session = TokyoSession()
>>> tokyo_session.modified
False
>>> tokyo_session.get('cat')
>>> tokyo_session['cat'] = "dog"
>>> tokyo_session.modified
True
>>> tokyo_session.pop('cat')
'dog'
>>> tokyo_session.pop('some key', 'does not exist')
'does not exist'
>>> tokyo_session.save()
>>> tokyo_session.exists(tokyo_session.session_key)
True
>>> tokyo_session.delete(tokyo_session.session_key)
>>> tokyo_session.exists(tokyo_session.session_key)
False
>>> tokyo_session['foo'] = 'bar'
>>> tokyo_session.save()
>>> tokyo_session.exists(tokyo_session.session_key)
True
>>> prev_key = tokyo_session.session_key
>>> tokyo_session.flush()
>>> tokyo_session.exists(prev_key)
False
>>> tokyo_session.session_key == prev_key
False
>>> tokyo_session.modified, tokyo_session.accessed
(True, True)
>>> tokyo_session['a'], tokyo_session['b'] = 'c', 'd'
>>> tokyo_session.save()
>>> prev_key = tokyo_session.session_key
>>> prev_data = tokyo_session.items()
>>> tokyo_session.cycle_key()
>>> tokyo_session.session_key == prev_key
False
>>> tokyo_session.items() == prev_data
True
"""
if __name__ == '__main__':
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import doctest
doctest.testmod()
| bsd-3-clause | 6,392,894,074,245,100,000 | 25.5 | 66 | 0.682264 | false |
yaricom/brainhash | src/experiment_cA5_dt_th_al_ah_bl.py | 1 | 1966 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 5, delta, theta, alpha low, alpha high, beta low, batch size = 5 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
experiment_name = 'cA_5_dt-th-a_l-a_h-b_l' # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = 5
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = 5
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
| gpl-3.0 | 2,978,737,678,313,298,000 | 31.229508 | 124 | 0.639369 | false |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/horovod/horovod/tensorflow/compression.py | 1 | 3057 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications copyright (C) 2017 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient compression algorithms."""
from enum import Enum
from functools import partial
import tensorflow as tf
class NoneCompression(object):
"""Default no-op compression."""
__instance = None
def __init__(self):
if NoneCompression.__instance is not None:
raise Exception("NoneCompression is a singleton")
else:
NoneCompression.__instance = self
def compress(self, tensor):
"""Returns the tensor unmodified."""
return tensor
def decompress(self, tensor):
"""Returns the tensor unmodified."""
return tensor
@staticmethod
def instance():
"""Returns the singleton instance."""
if NoneCompression.__instance is None:
NoneCompression()
return NoneCompression.__instance
class FP16Compression(object):
"""Compress all floating point gradients to 16-bit."""
def __init__(self, dtype):
"""Compresses tensors of the given dtype, and decompresses back."""
self._dtype = dtype
def compress(self, tensor):
"""Downcasts the tensor to 16-bit."""
if tensor.dtype != self._dtype:
raise ValueError('expected tensor of type %s but given %s' %
(str(self._dtype), str(tensor.dtype)))
tensor_compressed = tensor
if self._dtype.is_floating:
# Only allow compression from other floating point types
tensor_compressed = tf.cast(tensor, dtype=tf.float16)
return tensor_compressed
def decompress(self, tensor):
"""Upcasts the tensor to the dtype of the last compressed tensor."""
tensor_decompressed = tensor
if self._dtype.is_floating:
tensor_decompressed = tf.cast(tensor, dtype=self._dtype)
return tensor_decompressed
class Compression(Enum):
"""Optional gradient compression algorithm used during allreduce."""
"""Do not compress the gradients. This is the default."""
none = partial(lambda dtype: NoneCompression.instance())
"""Compress all floating point gradients to 16-bit."""
fp16 = partial(lambda dtype: FP16Compression(dtype))
def get_compressor(self, dtype):
"""Returns a new compressor instance for the given dtype."""
return self.value(dtype)
| apache-2.0 | -451,647,372,317,546,940 | 34.546512 | 80 | 0.649984 | false |
ngageoint/scale | scale/scheduler/test/scheduling/test_manager.py | 1 | 22876 | from __future__ import absolute_import
from __future__ import unicode_literals
import django
from django.test import TestCase
from django.utils.timezone import now
from mock import MagicMock, patch
from error.models import reset_error_cache
from job.execution.manager import job_exe_mgr
from job.models import JobExecution
from job.test import utils as job_test_utils
from node.models import Node
from node.resources.node_resources import NodeResources
from node.resources.resource import Cpus, Disk, Mem
from queue.models import Queue
from queue.test import utils as queue_test_utils
from scheduler.cleanup.manager import cleanup_mgr
from scheduler.manager import scheduler_mgr
from scheduler.models import Scheduler, ClusterResources
from scheduler.node.agent import Agent
from scheduler.node.manager import node_mgr
from scheduler.resources.manager import resource_mgr
from scheduler.resources.offer import ResourceOffer
from scheduler.scheduling.manager import SchedulingManager
from scheduler.sync.job_type_manager import job_type_mgr
from scheduler.tasks.manager import system_task_mgr
class TestSchedulingManager(TestCase):
fixtures = ['basic_job_errors.json']
def setUp(self):
django.setup()
reset_error_cache()
self.framework_id = '1234'
Scheduler.objects.initialize_scheduler()
Scheduler.objects.update(num_message_handlers=0) # Prevent message handler tasks from scheduling
self._client = MagicMock()
scheduler_mgr.sync_with_database()
scheduler_mgr.update_from_mesos(framework_id=self.framework_id)
resource_mgr.clear()
job_exe_mgr.clear()
self.agent_1 = Agent('agent_1', 'host_1')
self.agent_2 = Agent('agent_2', 'host_2')
self.agent_3 = Agent('agent_3', 'host_2')
node_mgr.clear()
node_mgr.register_agents([self.agent_1, self.agent_2])
node_mgr.sync_with_database(scheduler_mgr.config)
# Ignore initial cleanup, health check, and image pull tasks
for node in node_mgr.get_nodes():
node._last_health_task = now()
node._initial_cleanup_completed()
node._is_image_pulled = True
node._update_state()
if node.agent_id == 'agent_1':
self.node_1_id = node.id
cleanup_mgr.update_nodes(node_mgr.get_nodes())
self.node_1 = Node.objects.get(id=self.node_1_id)
# Ignore system tasks
system_task_mgr._is_db_update_completed = True
self.queue_1 = queue_test_utils.create_queue(cpus_required=4.0, mem_required=1024.0, disk_in_required=100.0,
disk_out_required=200.0, disk_total_required=300.0)
self.queue_2 = queue_test_utils.create_queue(cpus_required=8.0, mem_required=512.0, disk_in_required=400.0,
disk_out_required=45.0, disk_total_required=445.0)
self.queue_large = queue_test_utils.create_queue(resources=NodeResources([Cpus(125.0), Mem(12048.0), Disk(12048.0)]))
job_type_mgr.sync_with_database()
def test_successful_schedule(self):
"""Tests successfully calling perform_scheduling()"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 2) # Schedule smaller queued job executions
# Ensure job execution models are created and queue models are deleted
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_1.job_id).count(), 1)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_2.job_id).count(), 1)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_large.job_id).count(), 0)
self.assertEqual(Queue.objects.filter(id__in=[self.queue_1.id, self.queue_2.id]).count(), 0)
def test_increased_resources(self):
"""Tests calling perform_scheduling() with more resources"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(225.0), Mem(22048.0), Disk(22048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 3) # Schedule all queued job executions
# Ensure job execution models are created and queue models are deleted
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_1.job_id).count(), 1)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_2.job_id).count(), 1)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_large.job_id).count(), 1)
self.assertEqual(Queue.objects.filter(id__in=[self.queue_1.id, self.queue_2.id, self.queue_large.id]).count(), 0)
def test_node_with_new_agent_id(self):
"""Tests successfully calling perform_scheduling() when a node get a new agent ID"""
# Host 2 gets new agent ID of agent_3
node_mgr.lost_node(self.agent_2)
node_mgr.register_agents([self.agent_3])
node_mgr.sync_with_database(scheduler_mgr.config)
offer = ResourceOffer('offer', self.agent_3.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer])
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 2) # Schedule both queued job executions
# Check that created tasks have the correct agent ID
calls = self._client.method_calls
# One for checking for driver and second for task launch
self.assertEqual(2, len(calls))
# Get tasks off 2nd calls (index
mesos_tasks = calls[1][1][1]
for mesos_task in mesos_tasks:
self.assertEqual(self.agent_3.agent_id, mesos_task['agent_id']['value'])
def test_paused_scheduler(self):
"""Tests calling perform_scheduling() with a paused scheduler"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
Scheduler.objects.update(is_paused=True)
scheduler_mgr.sync_with_database()
node_mgr.sync_with_database(scheduler_mgr.config) # Updates nodes with paused scheduler
system_task_mgr._is_db_update_completed = False # Make sure system tasks don't get scheduled
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 0)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_1.job_id).count(), 0)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_2.job_id).count(), 0)
self.assertEqual(Queue.objects.filter(id__in=[self.queue_1.id, self.queue_2.id]).count(), 2)
def test_missing_job_types(self):
"""Tests calling perform_scheduling() when a queued job type has not been synced to the scheduler"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
scheduling_manager = SchedulingManager()
# Clear out job type manager for scheduling
with patch('scheduler.scheduling.manager.job_type_mgr.get_job_types') as mock_get_job_types:
mock_get_job_types.return_value = {}
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
# Nothing should be scheduled
self.assertEqual(num_tasks, 0)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_1.job_id).count(), 0)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_2.job_id).count(), 0)
self.assertEqual(Queue.objects.filter(id__in=[self.queue_1.id, self.queue_2.id]).count(), 2)
def test_missing_workspace(self):
"""Tests calling perform_scheduling() when a queued job's workspace has not been synced to the scheduler"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
# Add workspaces to the queued jobs
queue_1 = Queue.objects.get(id=self.queue_1.id)
config = queue_1.get_execution_configuration()
config.set_output_workspaces({'my_output': 'my_workspace'})
queue_1.configuration = config.get_dict()
queue_1.save()
queue_2 = Queue.objects.get(id=self.queue_2.id)
config = queue_2.get_execution_configuration()
config.set_output_workspaces({'my_output': 'my_workspace'})
queue_2.configuration = config.get_dict()
queue_2.save()
scheduling_manager = SchedulingManager()
# Clear out workspace manager for scheduling
with patch('scheduler.scheduling.manager.workspace_mgr.get_workspaces') as mock_get_workspaces:
mock_get_workspaces.return_value = {}
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
# Nothing should be scheduled
self.assertEqual(num_tasks, 0)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_1.job_id).count(), 0)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_2.job_id).count(), 0)
self.assertEqual(Queue.objects.filter(id__in=[self.queue_1.id, self.queue_2.id]).count(), 2)
def test_paused_job_type(self):
"""Tests calling perform_scheduling() when a job type is paused"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
self.queue_1.job_type.is_paused = True
self.queue_1.job_type.save()
job_type_mgr.sync_with_database()
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 1) # Schedule queued job execution that is not paused
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_1.job_id).count(), 0)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_2.job_id).count(), 1)
self.assertEqual(Queue.objects.filter(id__in=[self.queue_1.id, self.queue_2.id]).count(), 1)
def test_job_type_limit(self):
"""Tests calling perform_scheduling() with a job type limit"""
Queue.objects.all().delete()
job_type_with_limit = job_test_utils.create_seed_job_type()
job_type_with_limit.max_scheduled = 4
job_type_with_limit.save()
running_job_exe_1 = job_test_utils.create_running_job_exe(agent_id=self.agent_1.agent_id,
job_type=job_type_with_limit, node=self.node_1)
queue_test_utils.create_queue(job_type=job_type_with_limit)
queue_test_utils.create_queue(job_type=job_type_with_limit)
queue_test_utils.create_queue(job_type=job_type_with_limit)
queue_test_utils.create_queue(job_type=job_type_with_limit)
queue_test_utils.create_queue(job_type=job_type_with_limit)
queue_test_utils.create_queue(job_type=job_type_with_limit)
job_type_mgr.sync_with_database()
# One job of this type is already running
job_exe_mgr.schedule_job_exes([running_job_exe_1], [])
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(0.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 3) # One is already running, should only be able to schedule 3 more
def test_canceled_queue_model(self):
"""Tests successfully calling perform_scheduling() when a queue model has been canceled"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
self.queue_1.is_canceled = True
self.queue_1.save()
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 1) # Scheduled non-canceled queued job execution
# queue_1 should be canceled, queue_2 should be running, queue should be empty now
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_1.job_id).count(), 1)
self.assertEqual(JobExecution.objects.filter(job_id=self.queue_2.job_id).count(), 1)
self.assertEqual(Queue.objects.filter(id__in=[self.queue_1.id, self.queue_2.id]).count(), 0)
# Job execution manager should have a message for the canceled job execution
messages = job_exe_mgr.get_messages()
found_job_exe_end_message = False
for message in messages:
if message.type == 'create_job_exe_ends':
found_job_exe_end_message = True
self.assertTrue(found_job_exe_end_message)
def test_schedule_system_tasks(self):
"""Tests successfully calling perform_scheduling() when scheduling system tasks"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(1024.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2])
# Clear the queue
Queue.objects.all().delete()
# Set us up to schedule a database update task
system_task_mgr._is_db_update_completed = False
# Set us up to schedule 2 message handler tasks
Scheduler.objects.update(num_message_handlers=2)
scheduler_mgr.sync_with_database()
scheduling_manager = SchedulingManager()
num_tasks = scheduling_manager.perform_scheduling(self._client, now())
self.assertEqual(num_tasks, 3) # Schedule database update task and 2 message handler tasks
def test_max_resources(self):
"""Tests successfully calculating the max resources in a cluster"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(22048.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
offer_3 = ResourceOffer('offer_3', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(225.0), Mem(1024.0), Disk(22048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2, offer_3])
resource_mgr.refresh_agent_resources([], now())
max = resource_mgr.get_max_available_resources()
self.assertTrue(max.is_equal(NodeResources([Cpus(250.0), Mem(22048.0), Disk(24096.0)])))
def test_all_available_resources(self):
"""Tests successfully calculating the available resources in a cluster"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(22048.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
offer_3 = ResourceOffer('offer_3', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(225.0), Mem(1024.0), Disk(22048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2, offer_3])
resource_mgr.refresh_agent_resources([], now())
all_available_resources = resource_mgr.get_all_available_resources()
self.assertDictEqual(all_available_resources, {'mem': 25120.0, 'gpus': 0.0, 'disk': 25120.0, 'cpus': 252.0})
def test_update_all_cluster_resources_no_resources(self):
"""Tests updating the all cluster resources database when none are defined."""
resource_mgr.refresh_agent_resources([], now())
resource_db = ClusterResources.objects.first()
self.assertIsNone(resource_db)
resource_mgr.update_all_cluster_resources()
resource_db = ClusterResources.objects.first()
self.assertIsNone(resource_db)
def test_update_all_cluster_resources(self):
"""Tests successfully updating the all cluster resources database in a cluster"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(22048.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
offer_3 = ResourceOffer('offer_3', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(225.0), Mem(1024.0), Disk(22048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2, offer_3])
resource_mgr.refresh_agent_resources([], now())
resource_db = ClusterResources.objects.first()
self.assertIsNone(resource_db)
resource_mgr.update_all_cluster_resources()
resource_db = ClusterResources.objects.first()
self.assertIsNotNone(resource_db)
self.assertEqual(resource_db.mem, 25120.0)
self.assertEqual(resource_db.gpus, 0.0)
self.assertEqual(resource_db.disk, 25120.0)
self.assertEqual(resource_db.cpus, 252.0)
def test_get_queued_resources(self):
"""Tests successfully getting queued resource information"""
offer_1 = ResourceOffer('offer_1', self.agent_1.agent_id, self.framework_id,
NodeResources([Cpus(2.0), Mem(22048.0), Disk(1024.0)]), now(), None)
offer_2 = ResourceOffer('offer_2', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(25.0), Mem(2048.0), Disk(2048.0)]), now(), None)
offer_3 = ResourceOffer('offer_3', self.agent_2.agent_id, self.framework_id,
NodeResources([Cpus(225.0), Mem(1024.0), Disk(22048.0)]), now(), None)
resource_mgr.add_new_offers([offer_1, offer_2, offer_3])
resource_mgr.refresh_agent_resources([], now())
resource_db = ClusterResources.objects.first()
self.assertIsNone(resource_db)
resource_mgr.update_all_cluster_resources()
resource_db = ClusterResources.objects.first()
self.assertIsNotNone(resource_db)
self.assertEqual(resource_db.mem, 25120.0)
self.assertEqual(resource_db.gpus, 0.0)
self.assertEqual(resource_db.disk, 25120.0)
self.assertEqual(resource_db.cpus, 252.0)
queued_resources = resource_mgr.get_queued_resources()
self.assertDictEqual(queued_resources, {
"cluster_resources": {'cpus': 252,'disk': 25120, 'gpus': 0, 'mem': 25120},
"queue_lengths": {'PENDING': 0, 'QUEUED': 3, 'RUNNING': 0},
"total_resources": {'PENDING': {}, 'QUEUED': {'cpus': 3.0, 'mem': 384.0}, 'RUNNING': {}}
})
def test_get_queued_resources_with_no_resources(self):
"""Tests successfully getting queued resource information when all cluster resources is empty."""
resource_mgr.refresh_agent_resources([], now())
resource_db = ClusterResources.objects.first()
self.assertIsNone(resource_db)
resource_mgr.update_all_cluster_resources()
resource_db = ClusterResources.objects.first()
self.assertIsNone(resource_db)
queued_resources = resource_mgr.get_queued_resources()
self.assertDictEqual(queued_resources, {
"cluster_resources": {},
"queue_lengths": {'PENDING': 0, 'QUEUED': 3, 'RUNNING': 0},
"total_resources": {'PENDING': {}, 'QUEUED': {'cpus': 3.0, 'mem': 384.0}, 'RUNNING': {}}
})
| apache-2.0 | -8,596,555,282,675,143,000 | 52.57377 | 125 | 0.634027 | false |
fuzeman/plex.py | plex/objects/library/metadata/clip.py | 1 | 1956 | from plex.objects.core.base import Property
from plex.objects.library.metadata.base import Metadata
from plex.objects.library.metadata.photo import PhotoAlbum
from plex.objects.library.video import Video
class Clip(Video, Metadata):
grandparent = Property(resolver=lambda: Clip.construct_grandparent)
parent = Property(resolver=lambda: Clip.construct_parent)
extra_type = Property('extraType', int)
index = Property(type=int)
filename = Property
device = Property
def __repr__(self):
if self.grandparent and self.parent:
return '<Clip %r - %r - %r>' % (
self.grandparent.title,
self.parent.title,
self.title
)
if self.grandparent:
return '<Clip %r - %r>' % (
self.grandparent.title,
self.title
)
if self.parent:
return '<Clip %r - %r>' % (
self.parent.title,
self.title
)
return '<Clip %r>' % self.title
@staticmethod
def construct_grandparent(client, node):
attribute_map = {
'key': 'grandparentKey',
'ratingKey': 'grandparentRatingKey',
'index': 'grandparentIndex',
'title': 'grandparentTitle',
'art': 'grandparentArt',
'thumb': 'grandparentThumb'
}
return PhotoAlbum.construct(client, node, attribute_map, child=True)
@staticmethod
def construct_parent(client, node):
attribute_map = {
'key': 'parentKey',
'ratingKey': 'parentRatingKey',
'index': 'parentIndex',
'title': 'parentTitle',
'art': 'parentArt',
'thumb': 'parentThumb'
}
return PhotoAlbum.construct(client, node, attribute_map, child=True)
| mit | 7,442,453,030,430,962,000 | 27.764706 | 76 | 0.534765 | false |
hnlaomie/python-tools | util/file/build_report.py | 1 | 2895 | # -*- coding: utf-8 -*-
import os, sys, csv
def get_order_list(order_file: str) -> [] :
order_list = []
with open(order_file, "r") as csv_input:
reader = csv.reader(csv_input)
for row in reader:
order_list.append(row[0])
return order_list
def save_report(data: [], order_id: str, out_path: str, report_file: str):
order_file = report_file.replace("orderId", order_id)
out_file = os.path.join(out_path, order_file)
with open(out_file, "w") as csv_output:
writer = csv.writer(csv_output, lineterminator='\n')
writer.writerows(data)
def build_report(order_file: str, csv_file: str, out_path: str):
# used order map
used_order_map = {}
# row data list
row_list = []
report_file = os.path.basename(csv_file)
pre_order_id = None
# header on first line
is_header = True
header = None
with open(csv_file, "r") as csv_input:
reader = csv.reader(csv_input)
for row in reader:
if (len(row) > 0):
if is_header:
# save first line to header, first column is order_id
header = row[1:]
is_header = False
else:
order_id = row[0]
if pre_order_id == None:
pre_order_id = order_id
# save data to file when line change to next order_id
if order_id != pre_order_id:
row_list.insert(0, header)
used_order_map[pre_order_id] = pre_order_id
save_report(row_list, pre_order_id, out_path, report_file)
row_list.clear()
pre_order_id = order_id
row_list.append(row[1:])
if pre_order_id != None:
row_list.insert(0, header)
used_order_map[pre_order_id] = pre_order_id
save_report(row_list, pre_order_id, out_path, report_file)
# save empty report with header
row_list.clear()
row_list.append(header)
order_list = get_order_list(order_file)
for order_id in order_list:
if (used_order_map.get(order_id) == None):
save_report(row_list, order_id, out_path, report_file)
"""
usage: python build_report.py [order_file] [csv_file] [out_path]
read data from csv_file, group by order_id and output multipule reports to out_path.
if order without data, output empty report with header.
order_file: with multiple orderId
csv_file: first column is "orderId"
out_path: report's directory
"""
if __name__ == '__main__':
if (len(sys.argv) > 3):
order_file = sys.argv[1]
csv_file = sys.argv[2]
out_path = sys.argv[3]
build_report(order_file, csv_file, out_path)
else:
print("usage: python build_report.py [order_file] [csv_file] [out_path]") | mit | -8,762,075,644,433,950,000 | 32.674419 | 84 | 0.559585 | false |
danielquinn/paperless | src/paperless/settings.py | 1 | 11187 | """
Django settings for paperless project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import json
import os
import re
from dotenv import load_dotenv
# Tap paperless.conf if it's available
if os.path.exists("/etc/paperless.conf"):
load_dotenv("/etc/paperless.conf")
elif os.path.exists("/usr/local/etc/paperless.conf"):
load_dotenv("/usr/local/etc/paperless.conf")
def __get_boolean(key, default="NO"):
"""
Return a boolean value based on whatever the user has supplied in the
environment based on whether the value "looks like" it's True or not.
"""
return bool(os.getenv(key, default).lower() in ("yes", "y", "1", "t", "true"))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# The secret key has a default that should be fine so long as you're hosting
# Paperless on a closed network. However, if you're putting this anywhere
# public, you should change the key to something unique and verbose.
SECRET_KEY = os.getenv(
"PAPERLESS_SECRET_KEY",
"e11fl1oa-*ytql8p)(06fbj4ukrlo+n7k&q5+$1md7i+mge=ee"
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = __get_boolean("PAPERLESS_DEBUG", "YES")
LOGIN_URL = "admin:login"
ALLOWED_HOSTS = ["*"]
_allowed_hosts = os.getenv("PAPERLESS_ALLOWED_HOSTS")
if _allowed_hosts:
ALLOWED_HOSTS = _allowed_hosts.split(",")
FORCE_SCRIPT_NAME = os.getenv("PAPERLESS_FORCE_SCRIPT_NAME")
# Application definition
INSTALLED_APPS = [
"whitenoise.runserver_nostatic",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"corsheaders",
"django_extensions",
"paperless",
"documents.apps.DocumentsConfig",
"reminders.apps.RemindersConfig",
"paperless_tesseract.apps.PaperlessTesseractConfig",
"paperless_text.apps.PaperlessTextConfig",
"django.contrib.admin",
"rest_framework",
"crispy_forms",
"django_filters",
"djangoql",
]
if os.getenv("PAPERLESS_INSTALLED_APPS"):
INSTALLED_APPS += os.getenv("PAPERLESS_INSTALLED_APPS").split(",")
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# Enable whitenoise compression and caching
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# We allow CORS from localhost:8080
CORS_ORIGIN_WHITELIST = tuple(os.getenv("PAPERLESS_CORS_ALLOWED_HOSTS", "http://localhost:8080,https://localhost:8080").split(","))
# If auth is disabled, we just use our "bypass" authentication middleware
if bool(os.getenv("PAPERLESS_DISABLE_LOGIN", "false").lower() in ("yes", "y", "1", "t", "true")):
_index = MIDDLEWARE.index("django.contrib.auth.middleware.AuthenticationMiddleware")
MIDDLEWARE[_index] = "paperless.middleware.Middleware"
ROOT_URLCONF = 'paperless.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paperless.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(
os.getenv(
"PAPERLESS_DBDIR",
os.path.join(BASE_DIR, "..", "data")
),
"db.sqlite3"
)
}
}
if os.getenv("PAPERLESS_DBUSER"):
DATABASES["default"] = {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": os.getenv("PAPERLESS_DBNAME", "paperless"),
"USER": os.getenv("PAPERLESS_DBUSER"),
}
if os.getenv("PAPERLESS_DBPASS"):
DATABASES["default"]["PASSWORD"] = os.getenv("PAPERLESS_DBPASS")
if os.getenv("PAPERLESS_DBHOST"):
DATABASES["default"]["HOST"] = os.getenv("PAPERLESS_DBHOST")
if os.getenv("PAPERLESS_DBPORT"):
DATABASES["default"]["PORT"] = os.getenv("PAPERLESS_DBPORT")
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.getenv("PAPERLESS_TIME_ZONE", "UTC")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = os.getenv(
"PAPERLESS_STATICDIR", os.path.join(BASE_DIR, "..", "static"))
MEDIA_ROOT = os.getenv(
"PAPERLESS_MEDIADIR", os.path.join(BASE_DIR, "..", "media"))
STATIC_URL = os.getenv("PAPERLESS_STATIC_URL", "/static/")
MEDIA_URL = os.getenv("PAPERLESS_MEDIA_URL", "/media/")
# Other
# Disable Django's artificial limit on the number of form fields to submit at
# once. This is a protection against overloading the server, but since this is
# a self-hosted sort of gig, the benefits of being able to mass-delete a tonne
# of log entries outweight the benefits of such a safeguard.
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Paperless-specific stuff
# You shouldn't have to edit any of these values. Rather, you can set these
# values in /etc/paperless.conf instead.
# ----------------------------------------------------------------------------
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"consumer": {
"class": "documents.loggers.PaperlessLogger",
}
},
"loggers": {
"documents": {
"handlers": ["consumer"],
"level": os.getenv("PAPERLESS_CONSUMER_LOG_LEVEL", "INFO"),
},
},
}
# The default language that tesseract will attempt to use when parsing
# documents. It should be a 3-letter language code consistent with ISO 639.
OCR_LANGUAGE = os.getenv("PAPERLESS_OCR_LANGUAGE", "eng")
# The amount of threads to use for OCR
OCR_THREADS = os.getenv("PAPERLESS_OCR_THREADS")
# OCR all documents?
OCR_ALWAYS = __get_boolean("PAPERLESS_OCR_ALWAYS")
# If this is true, any failed attempts to OCR a PDF will result in the PDF
# being indexed anyway, with whatever we could get. If it's False, the file
# will simply be left in the CONSUMPTION_DIR.
FORGIVING_OCR = __get_boolean("PAPERLESS_FORGIVING_OCR")
# GNUPG needs a home directory for some reason
GNUPG_HOME = os.getenv("HOME", "/tmp")
# Convert is part of the ImageMagick package
CONVERT_BINARY = os.getenv("PAPERLESS_CONVERT_BINARY", "convert")
CONVERT_TMPDIR = os.getenv("PAPERLESS_CONVERT_TMPDIR")
CONVERT_MEMORY_LIMIT = os.getenv("PAPERLESS_CONVERT_MEMORY_LIMIT")
CONVERT_DENSITY = os.getenv("PAPERLESS_CONVERT_DENSITY")
# Ghostscript
GS_BINARY = os.getenv("PAPERLESS_GS_BINARY", "gs")
# OptiPNG
OPTIPNG_BINARY = os.getenv("PAPERLESS_OPTIPNG_BINARY", "optipng")
# Unpaper
UNPAPER_BINARY = os.getenv("PAPERLESS_UNPAPER_BINARY", "unpaper")
# This will be created if it doesn't exist
SCRATCH_DIR = os.getenv("PAPERLESS_SCRATCH_DIR", "/tmp/paperless")
# This is where Paperless will look for PDFs to index
CONSUMPTION_DIR = os.getenv("PAPERLESS_CONSUMPTION_DIR")
# (This setting is ignored on Linux where inotify is used instead of a
# polling loop.)
# The number of seconds that Paperless will wait between checking
# CONSUMPTION_DIR. If you tend to write documents to this directory very
# slowly, you may want to use a higher value than the default.
CONSUMER_LOOP_TIME = int(os.getenv("PAPERLESS_CONSUMER_LOOP_TIME", 10))
# Pre-2.x versions of Paperless stored your documents locally with GPG
# encryption, but that is no longer the default. This behaviour is still
# available, but it must be explicitly enabled by setting
# `PAPERLESS_PASSPHRASE` in your environment or config file. The default is to
# store these files unencrypted.
#
# Translation:
# * If you're a new user, you can safely ignore this setting.
# * If you're upgrading from 1.x, this must be set, OR you can run
# `./manage.py change_storage_type gpg unencrypted` to decrypt your files,
# after which you can unset this value.
PASSPHRASE = os.getenv("PAPERLESS_PASSPHRASE")
# Trigger a script after every successful document consumption?
PRE_CONSUME_SCRIPT = os.getenv("PAPERLESS_PRE_CONSUME_SCRIPT")
POST_CONSUME_SCRIPT = os.getenv("PAPERLESS_POST_CONSUME_SCRIPT")
# Whether to display a selected document inline, or download it as attachment:
INLINE_DOC = __get_boolean("PAPERLESS_INLINE_DOC")
# The number of items on each page in the web UI. This value must be a
# positive integer, but if you don't define one in paperless.conf, a default of
# 100 will be used.
PAPERLESS_LIST_PER_PAGE = int(os.getenv("PAPERLESS_LIST_PER_PAGE", 100))
FY_START = os.getenv("PAPERLESS_FINANCIAL_YEAR_START")
FY_END = os.getenv("PAPERLESS_FINANCIAL_YEAR_END")
# Specify the default date order (for autodetected dates)
DATE_ORDER = os.getenv("PAPERLESS_DATE_ORDER", "DMY")
FILENAME_DATE_ORDER = os.getenv("PAPERLESS_FILENAME_DATE_ORDER")
# Transformations applied before filename parsing
FILENAME_PARSE_TRANSFORMS = []
for t in json.loads(os.getenv("PAPERLESS_FILENAME_PARSE_TRANSFORMS", "[]")):
FILENAME_PARSE_TRANSFORMS.append((re.compile(t["pattern"]), t["repl"]))
# Specify for how many years a correspondent is considered recent. Recent
# correspondents will be shown in a separate "Recent correspondents" filter as
# well. Set to 0 to disable this filter.
PAPERLESS_RECENT_CORRESPONDENT_YEARS = int(os.getenv(
"PAPERLESS_RECENT_CORRESPONDENT_YEARS", 0))
# Specify the filename format for out files
PAPERLESS_FILENAME_FORMAT = os.getenv("PAPERLESS_FILENAME_FORMAT")
| gpl-3.0 | -3,477,589,117,054,092,300 | 32 | 131 | 0.699026 | false |
rbuffat/pyidf | tests/test_externalinterfacefunctionalmockupunitexporttoschedule.py | 1 | 1925 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.external_interface import ExternalInterfaceFunctionalMockupUnitExportToSchedule
log = logging.getLogger(__name__)
class TestExternalInterfaceFunctionalMockupUnitExportToSchedule(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_externalinterfacefunctionalmockupunitexporttoschedule(self):
pyidf.validation_level = ValidationLevel.error
obj = ExternalInterfaceFunctionalMockupUnitExportToSchedule()
# alpha
var_schedule_name = "Schedule Name"
obj.schedule_name = var_schedule_name
# object-list
var_schedule_type_limits_names = "object-list|Schedule Type Limits Names"
obj.schedule_type_limits_names = var_schedule_type_limits_names
# alpha
var_fmu_variable_name = "FMU Variable Name"
obj.fmu_variable_name = var_fmu_variable_name
# real
var_initial_value = 4.4
obj.initial_value = var_initial_value
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].schedule_name, var_schedule_name)
self.assertEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].schedule_type_limits_names, var_schedule_type_limits_names)
self.assertEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].fmu_variable_name, var_fmu_variable_name)
self.assertAlmostEqual(idf2.externalinterfacefunctionalmockupunitexporttoschedules[0].initial_value, var_initial_value) | apache-2.0 | 7,666,285,236,414,696,000 | 37.52 | 147 | 0.720519 | false |
facebookexperimental/eden | eden/hg-server/tests/test-command-template-t.py | 1 | 121759 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) Mercurial Contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# isort:skip_file
from __future__ import absolute_import
import datetime
import os
import sys
import warnings
from edenscm.mercurial import namespaces
from edenscm.mercurial import pycompat
from testutil.autofix import eq
from testutil.dott import feature, sh, testtmp # noqa: F401
is_py3 = sys.version_info[0] >= 3
sh % "setconfig 'extensions.treemanifest=!'"
sh % "setconfig 'ui.allowemptycommit=1'"
sh % "hg init a"
sh % "cd a"
sh % "echo a" > "a"
sh % "hg add a"
sh % "echo line 1" > "b"
sh % "echo line 2" >> "b"
sh % "hg commit -l b -d '1000000 0' -u 'User Name <user@hostname>'"
sh % "hg add b"
sh % "echo other 1" > "c"
sh % "echo other 2" >> "c"
sh % "echo" >> "c"
sh % "echo other 3" >> "c"
sh % "hg commit -l c -d '1100000 0' -u 'A. N. Other <other@place>'"
sh % "hg add c"
sh % "hg commit -m 'no person' -d '1200000 0' -u 'other@place'"
sh % "echo c" >> "c"
sh % "hg commit -m 'no user, no domain' -d '1300000 0' -u person"
sh % "hg commit -m 'new branch' -d '1400000 0' -u person"
sh % "hg bookmark foo"
sh % "hg co -q 3"
sh % "echo other 4" >> "d"
sh % "hg add d"
sh % "hg commit -m 'new head' -d '1500000 0' -u person"
sh % "hg merge -q foo"
sh % "hg commit -m merge -d '1500001 0' -u person"
sh % "hg log -r . -T '{username}'" == "test"
# Test arithmetic operators have the right precedence:
sh % 'hg log -l 1 -T \'{date(date, "%Y") + 5 * 10} {date(date, "%Y") - 2 * 3}\\n\'' == "2020 1964"
sh % 'hg log -l 1 -T \'{date(date, "%Y") * 5 + 10} {date(date, "%Y") * 3 - 2}\\n\'' == "9860 5908"
# Test division:
sh % "hg debugtemplate -r0 -v '{5 / 2} {mod(5, 2)}\\n'" == r"""
(template
(/
(integer '5')
(integer '2'))
(string ' ')
(func
(symbol 'mod')
(list
(integer '5')
(integer '2')))
(string '\n'))
2 1"""
sh % "hg debugtemplate -r0 -v '{5 / -2} {mod(5, -2)}\\n'" == r"""
(template
(/
(integer '5')
(negate
(integer '2')))
(string ' ')
(func
(symbol 'mod')
(list
(integer '5')
(negate
(integer '2'))))
(string '\n'))
-3 -1"""
sh % "hg debugtemplate -r0 -v '{-5 / 2} {mod(-5, 2)}\\n'" == r"""
(template
(/
(negate
(integer '5'))
(integer '2'))
(string ' ')
(func
(symbol 'mod')
(list
(negate
(integer '5'))
(integer '2')))
(string '\n'))
-3 1"""
sh % "hg debugtemplate -r0 -v '{-5 / -2} {mod(-5, -2)}\\n'" == r"""
(template
(/
(negate
(integer '5'))
(negate
(integer '2')))
(string ' ')
(func
(symbol 'mod')
(list
(negate
(integer '5'))
(negate
(integer '2'))))
(string '\n'))
2 -1"""
# Filters bind closer than arithmetic:
sh % "hg debugtemplate -r0 -v '{revset(\".\")|count - 1}\\n'" == r"""
(template
(-
(|
(func
(symbol 'revset')
(string '.'))
(symbol 'count'))
(integer '1'))
(string '\n'))
0"""
# But negate binds closer still:
sh % "hg debugtemplate -r0 -v '{1-3|stringify}\\n'" == r"""
(template
(-
(integer '1')
(|
(integer '3')
(symbol 'stringify')))
(string '\n'))
hg: parse error: arithmetic only defined on integers
[255]"""
sh % "hg debugtemplate -r0 -v '{-3|stringify}\\n'" == r"""
(template
(|
(negate
(integer '3'))
(symbol 'stringify'))
(string '\n'))
-3"""
# Filters bind as close as map operator:
sh % "hg debugtemplate -r0 -v '{desc|splitlines % \"{line}\\n\"}'" == r"""
(template
(%
(|
(symbol 'desc')
(symbol 'splitlines'))
(template
(symbol 'line')
(string '\n'))))
line 1
line 2"""
# Keyword arguments:
sh % "hg debugtemplate -r0 -v '{foo=bar|baz}'" == r"""
(template
(keyvalue
(symbol 'foo')
(|
(symbol 'bar')
(symbol 'baz'))))
hg: parse error: can't use a key-value pair in this context
[255]"""
sh % "hg debugtemplate '{pad(\"foo\", width=10, left=true)}\\n'" == " foo"
# Call function which takes named arguments by filter syntax:
sh % "hg debugtemplate '{\" \"|separate}'"
sh % 'hg debugtemplate \'{("not", "an", "argument", "list")|separate}\'' == r"""
hg: parse error: unknown method 'list'
[255]"""
# Second branch starting at nullrev:
sh % "hg update null" == "0 files updated, 0 files merged, 4 files removed, 0 files unresolved"
with open("second", "wb") as f:
# Valid utf-8 character
if is_py3:
f.write("🥈".encode("utf-8"))
else:
f.write("🥈")
# Invalid utf-8 character
f.write(b"\xe2\x28\xa1")
f.write(b"\n")
sh % "hg add second"
sh % "hg commit -m second -d '1000000 0' -u 'User Name <user@hostname>'"
sh % "echo third" > "third"
sh % "hg add third"
sh % "hg mv second fourth"
sh % "hg commit -m third -d '2020-01-01 10:01 UTC'"
sh % "hg log --template '{join(file_copies, \",\\n\")}\\n' -r ." == "fourth (second)"
sh % "hg log -T '{file_copies % \"{source} -> {name}\\n\"}' -r ." == "second -> fourth"
sh % 'hg log -T \'{rev} {ifcontains("fourth", file_copies, "t", "f")}\\n\' -r \'.:7\'' == r"""
8 t
7 f"""
# Working-directory revision has special identifiers, though they are still
# experimental:
sh % "hg log -r 'wdir()' -T '{rev}:{node}\\n'" == "2147483647:ffffffffffffffffffffffffffffffffffffffff"
# Some keywords are invalid for working-directory revision, but they should
# never cause crash:
sh % "hg log -r 'wdir()' -T '{manifest}\\n'"
# Quoting for ui.logtemplate
sh % "hg tip --config 'ui.logtemplate={rev}\\n'" == "8"
sh % "hg tip --config 'ui.logtemplate='\\''{rev}\\n'\\'''" == "8"
sh % "hg tip --config 'ui.logtemplate=\"{rev}\\n\"'" == "8"
sh % "hg tip --config 'ui.logtemplate=n{rev}\\n'" == "n8"
# Make sure user/global hgrc does not affect tests
sh % "echo '[ui]'" > ".hg/hgrc"
sh % "echo 'logtemplate ='" >> ".hg/hgrc"
sh % "echo 'style ='" >> ".hg/hgrc"
# Add some simple styles to settings
(
sh % "cat"
<< r"""
[templates]
simple = "{rev}\n"
simple2 = {rev}\n
rev = "should not precede {rev} keyword\n"
"""
>> ".hg/hgrc"
)
sh % "hg log -l1 -Tsimple" == "8"
sh % "hg log -l1 -Tsimple2" == "8"
sh % "hg log -l1 -Trev" == "should not precede 8 keyword"
sh % "hg log -l1 -T '{simple}'" == "8"
# Map file shouldn't see user templates:
sh % "cat" << r"""
changeset = 'nothing expanded:{simple}\n'
""" > "tmpl"
sh % "hg log -l1 --style ./tmpl" == "nothing expanded:"
# Test templates and style maps in files:
sh % "echo '{rev}'" > "tmpl"
sh % "hg log -l1 -T./tmpl" == "8"
sh % "hg log -l1 -Tblah/blah" == "blah/blah"
sh % "echo 'changeset = \"{rev}\\n\"'" > "map-simple"
sh % "hg log -l1 -T./map-simple" == "8"
# a map file may have [templates] and [templatealias] sections:
sh % "cat" << r"""
[templates]
changeset = "{a}\n"
[templatealias]
a = rev
""" > "map-simple"
sh % "hg log -l1 -T./map-simple" == "8"
# so it can be included in hgrc
sh % "cat" << r"""
%include map-simple
[templates]
foo = "{changeset}"
""" > "myhgrc"
sh % "'HGRCPATH=./myhgrc' hg log -l1 -Tfoo" == "8"
sh % "'HGRCPATH=./myhgrc' hg log -l1 '-T{a}\\n'" == "8"
# Test template map inheritance
sh % "echo '__base__ = map-cmdline.default'" > "map-simple"
sh % "echo 'cset = \"changeset: ***{rev}***\\n\"'" >> "map-simple"
sh % "hg log -l1 -T./map-simple" == r"""
changeset: ***8***
user: test
date: Wed Jan 01 10:01:00 2020 +0000
summary: third"""
# Test docheader, docfooter and separator in template map
sh % "cat" << r"""
docheader = '\{\n'
docfooter = '\n}\n'
separator = ',\n'
changeset = ' {dict(rev, node|short)|json}'
""" > "map-myjson"
sh % "hg log -l2 -T./map-myjson" == r"""
{
{"node": "209edb6a1848", "rev": 8},
{"node": "88058a185da2", "rev": 7}
}"""
# Test docheader, docfooter and separator in [templates] section
(
sh % "cat"
<< r"""
[templates]
myjson = ' {dict(rev, node|short)|json}'
myjson:docheader = '\{\n'
myjson:docfooter = '\n}\n'
myjson:separator = ',\n'
:docheader = 'should not be selected as a docheader for literal templates\n'
"""
>> ".hg/hgrc"
)
sh % "hg log -l2 -Tmyjson" == r"""
{
{"node": "209edb6a1848", "rev": 8},
{"node": "88058a185da2", "rev": 7}
}"""
sh % "hg log -l1 '-T{rev}\\n'" == "8"
# Template should precede style option
sh % "hg log -l1 --style default -T '{rev}\\n'" == "8"
# Add a commit with empty description, to ensure that the templates
# below will omit the description line.
sh % "echo c" >> "c"
sh % "hg add c"
sh % "hg commit -qm ' '"
# Remove commit with empty commit message, so as to not pollute further
# tests.
sh % "hg debugstrip -q ."
# Revision with no copies (used to print a traceback):
sh % "hg tip -v --template '\\n'"
# Compact style works:
sh % "hg log -Tcompact" == r"""
209edb6a1848 2020-01-01 10:01 +0000 test
third
88058a185da2 1970-01-12 13:46 +0000 user
second
f7e5795620e7 1970-01-18 08:40 +0000 person
merge
13207e5a10d9 1970-01-18 08:40 +0000 person
new head
[foo] 07fa1db10648 1970-01-17 04:53 +0000 person
new branch
10e46f2dcbf4 1970-01-16 01:06 +0000 person
no user, no domain
97054abb4ab8 1970-01-14 21:20 +0000 other
no person
b608e9d1a3f0 1970-01-13 17:33 +0000 other
other 1
1e4e1b8f71e0 1970-01-12 13:46 +0000 user
line 1"""
sh % "hg log -v --style compact" == r"""
209edb6a1848 2020-01-01 10:01 +0000 test
third
88058a185da2 1970-01-12 13:46 +0000 User Name <user@hostname>
second
f7e5795620e7 1970-01-18 08:40 +0000 person
merge
13207e5a10d9 1970-01-18 08:40 +0000 person
new head
07fa1db10648 1970-01-17 04:53 +0000 person
new branch
10e46f2dcbf4 1970-01-16 01:06 +0000 person
no user, no domain
97054abb4ab8 1970-01-14 21:20 +0000 other@place
no person
b608e9d1a3f0 1970-01-13 17:33 +0000 A. N. Other <other@place>
other 1
other 2
other 3
1e4e1b8f71e0 1970-01-12 13:46 +0000 User Name <user@hostname>
line 1
line 2"""
sh % "hg log --debug --style compact" == r"""
209edb6a1848 2020-01-01 10:01 +0000 test
third
88058a185da2 1970-01-12 13:46 +0000 User Name <user@hostname>
second
f7e5795620e7 1970-01-18 08:40 +0000 person
merge
13207e5a10d9 1970-01-18 08:40 +0000 person
new head
07fa1db10648 1970-01-17 04:53 +0000 person
new branch
10e46f2dcbf4 1970-01-16 01:06 +0000 person
no user, no domain
97054abb4ab8 1970-01-14 21:20 +0000 other@place
no person
b608e9d1a3f0 1970-01-13 17:33 +0000 A. N. Other <other@place>
other 1
other 2
other 3
1e4e1b8f71e0 1970-01-12 13:46 +0000 User Name <user@hostname>
line 1
line 2"""
# Test xml styles:
sh % "hg log --style xml -r 'not all()'" == r"""
<?xml version="1.0"?>
<log>
</log>"""
sh % "hg log --style xml" == r"""
<?xml version="1.0"?>
<log>
<logentry node="209edb6a18483c1434e4006bca4c2b1ee5e7090a">
<author email="test">test</author>
<date>2020-01-01T10:01:00+00:00</date>
<msg xml:space="preserve">third</msg>
</logentry>
<logentry node="88058a185da202d22e8ee0bb4d3515ff0ecb222b">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">second</msg>
</logentry>
<logentry node="f7e5795620e78993ad76680c4306bb2da83907b3">
<author email="person">person</author>
<date>1970-01-18T08:40:01+00:00</date>
<msg xml:space="preserve">merge</msg>
</logentry>
<logentry node="13207e5a10d9fd28ec424934298e176197f2c67f">
<author email="person">person</author>
<date>1970-01-18T08:40:00+00:00</date>
<msg xml:space="preserve">new head</msg>
</logentry>
<logentry node="07fa1db1064879a32157227401eb44b322ae53ce">
<bookmark>foo</bookmark>
<author email="person">person</author>
<date>1970-01-17T04:53:20+00:00</date>
<msg xml:space="preserve">new branch</msg>
</logentry>
<logentry node="10e46f2dcbf4823578cf180f33ecf0b957964c47">
<author email="person">person</author>
<date>1970-01-16T01:06:40+00:00</date>
<msg xml:space="preserve">no user, no domain</msg>
</logentry>
<logentry node="97054abb4ab824450e9164180baf491ae0078465">
<author email="other@place">other</author>
<date>1970-01-14T21:20:00+00:00</date>
<msg xml:space="preserve">no person</msg>
</logentry>
<logentry node="b608e9d1a3f0273ccf70fb85fd6866b3482bf965">
<author email="other@place">A. N. Other</author>
<date>1970-01-13T17:33:20+00:00</date>
<msg xml:space="preserve">other 1
other 2
other 3</msg>
</logentry>
<logentry node="1e4e1b8f71e05681d422154f5421e385fec3454f">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">line 1
line 2</msg>
</logentry>
</log>"""
sh % "hg log -v --style xml" == r"""
<?xml version="1.0"?>
<log>
<logentry node="209edb6a18483c1434e4006bca4c2b1ee5e7090a">
<author email="test">test</author>
<date>2020-01-01T10:01:00+00:00</date>
<msg xml:space="preserve">third</msg>
<paths>
<path action="A">fourth</path>
<path action="A">third</path>
<path action="R">second</path>
</paths>
<copies>
<copy source="second">fourth</copy>
</copies>
</logentry>
<logentry node="88058a185da202d22e8ee0bb4d3515ff0ecb222b">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">second</msg>
<paths>
<path action="A">second</path>
</paths>
</logentry>
<logentry node="f7e5795620e78993ad76680c4306bb2da83907b3">
<author email="person">person</author>
<date>1970-01-18T08:40:01+00:00</date>
<msg xml:space="preserve">merge</msg>
<paths>
</paths>
</logentry>
<logentry node="13207e5a10d9fd28ec424934298e176197f2c67f">
<author email="person">person</author>
<date>1970-01-18T08:40:00+00:00</date>
<msg xml:space="preserve">new head</msg>
<paths>
<path action="A">d</path>
</paths>
</logentry>
<logentry node="07fa1db1064879a32157227401eb44b322ae53ce">
<bookmark>foo</bookmark>
<author email="person">person</author>
<date>1970-01-17T04:53:20+00:00</date>
<msg xml:space="preserve">new branch</msg>
<paths>
</paths>
</logentry>
<logentry node="10e46f2dcbf4823578cf180f33ecf0b957964c47">
<author email="person">person</author>
<date>1970-01-16T01:06:40+00:00</date>
<msg xml:space="preserve">no user, no domain</msg>
<paths>
<path action="M">c</path>
</paths>
</logentry>
<logentry node="97054abb4ab824450e9164180baf491ae0078465">
<author email="other@place">other</author>
<date>1970-01-14T21:20:00+00:00</date>
<msg xml:space="preserve">no person</msg>
<paths>
<path action="A">c</path>
</paths>
</logentry>
<logentry node="b608e9d1a3f0273ccf70fb85fd6866b3482bf965">
<author email="other@place">A. N. Other</author>
<date>1970-01-13T17:33:20+00:00</date>
<msg xml:space="preserve">other 1
other 2
other 3</msg>
<paths>
<path action="A">b</path>
</paths>
</logentry>
<logentry node="1e4e1b8f71e05681d422154f5421e385fec3454f">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">line 1
line 2</msg>
<paths>
<path action="A">a</path>
</paths>
</logentry>
</log>"""
sh % "hg log --debug --style xml" == r"""
<?xml version="1.0"?>
<log>
<logentry node="209edb6a18483c1434e4006bca4c2b1ee5e7090a">
<author email="test">test</author>
<date>2020-01-01T10:01:00+00:00</date>
<msg xml:space="preserve">third</msg>
<paths>
<path action="A">fourth</path>
<path action="A">third</path>
<path action="R">second</path>
</paths>
<copies>
<copy source="second">fourth</copy>
</copies>
<extra key="branch">default</extra>
</logentry>
<logentry node="88058a185da202d22e8ee0bb4d3515ff0ecb222b">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">second</msg>
<paths>
<path action="A">second</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="f7e5795620e78993ad76680c4306bb2da83907b3">
<author email="person">person</author>
<date>1970-01-18T08:40:01+00:00</date>
<msg xml:space="preserve">merge</msg>
<paths>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="13207e5a10d9fd28ec424934298e176197f2c67f">
<author email="person">person</author>
<date>1970-01-18T08:40:00+00:00</date>
<msg xml:space="preserve">new head</msg>
<paths>
<path action="A">d</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="07fa1db1064879a32157227401eb44b322ae53ce">
<bookmark>foo</bookmark>
<author email="person">person</author>
<date>1970-01-17T04:53:20+00:00</date>
<msg xml:space="preserve">new branch</msg>
<paths>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="10e46f2dcbf4823578cf180f33ecf0b957964c47">
<author email="person">person</author>
<date>1970-01-16T01:06:40+00:00</date>
<msg xml:space="preserve">no user, no domain</msg>
<paths>
<path action="M">c</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="97054abb4ab824450e9164180baf491ae0078465">
<author email="other@place">other</author>
<date>1970-01-14T21:20:00+00:00</date>
<msg xml:space="preserve">no person</msg>
<paths>
<path action="A">c</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="b608e9d1a3f0273ccf70fb85fd6866b3482bf965">
<author email="other@place">A. N. Other</author>
<date>1970-01-13T17:33:20+00:00</date>
<msg xml:space="preserve">other 1
other 2
other 3</msg>
<paths>
<path action="A">b</path>
</paths>
<extra key="branch">default</extra>
</logentry>
<logentry node="1e4e1b8f71e05681d422154f5421e385fec3454f">
<author email="user@hostname">User Name</author>
<date>1970-01-12T13:46:40+00:00</date>
<msg xml:space="preserve">line 1
line 2</msg>
<paths>
<path action="A">a</path>
</paths>
<extra key="branch">default</extra>
</logentry>
</log>"""
# Test JSON style:
sh % "hg log -k nosuch -Tjson" == "[]"
sh % "hg log -qr . -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a"
}
]"""
sh % "hg log -vpr . -Tjson --stat" == (
r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"files": ["fourth", "second", "third"],
"diffstat": " fourth | 1 +\n second | 1 -\n third | 1 +\n 3 files changed, 2 insertions(+), 1 deletions(-)\n","""
+ (
'\n "diff": "diff -r 88058a185da2 -r 209edb6a1848 fourth\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/fourth\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+🥈\udced\udcb3\udca2(\udced\udcb2\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 second\\n--- a/second\\tMon Jan 12 13:46:40 1970 +0000\\n+++ /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n@@ -1,1 +0,0 @@\\n-🥈\udced\udcb3\udca2(\udced\udcb2\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 third\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/third\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+third\\n"\n'
if is_py3
else '\n "diff": "diff -r 88058a185da2 -r 209edb6a1848 fourth\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/fourth\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+🥈\xed\xb3\xa2(\xed\xb2\xa1\\ndiff -r 88058a185da2 -r 209edb6a1848 second\\n--- a/second\\tMon Jan 12 13:46:40 1970 +0000\\n+++ /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n@@ -1,1 +0,0 @@\\n-🥈\xed\xb3\xa2(\xed\xb2\xa1\\ndiff -r 88058a185da2 -r 209edb6a1848 third\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/third\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+third\\n"\n'
)
+ r""" }
]"""
)
# honor --git but not format-breaking diffopts
sh % "hg --config 'diff.noprefix=True' log --git -vpr . -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"files": ["fourth", "second", "third"],
"diff": "diff --git a/second b/fourth\nrename from second\nrename to fourth\ndiff --git a/third b/third\nnew file mode 100644\n--- /dev/null\n+++ b/third\n@@ -0,0 +1,1 @@\n+third\n"
}
]"""
sh % "hg log -T json" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"]
},
{
"rev": 7,
"node": "88058a185da202d22e8ee0bb4d3515ff0ecb222b",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "second",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"]
},
{
"rev": 6,
"node": "f7e5795620e78993ad76680c4306bb2da83907b3",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500001, 0],
"desc": "merge",
"bookmarks": [],
"parents": ["13207e5a10d9fd28ec424934298e176197f2c67f", "07fa1db1064879a32157227401eb44b322ae53ce"]
},
{
"rev": 5,
"node": "13207e5a10d9fd28ec424934298e176197f2c67f",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500000, 0],
"desc": "new head",
"bookmarks": [],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"]
},
{
"rev": 4,
"node": "07fa1db1064879a32157227401eb44b322ae53ce",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1400000, 0],
"desc": "new branch",
"bookmarks": ["foo"],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"]
},
{
"rev": 3,
"node": "10e46f2dcbf4823578cf180f33ecf0b957964c47",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1300000, 0],
"desc": "no user, no domain",
"bookmarks": [],
"parents": ["97054abb4ab824450e9164180baf491ae0078465"]
},
{
"rev": 2,
"node": "97054abb4ab824450e9164180baf491ae0078465",
"branch": "default",
"phase": "draft",
"user": "other@place",
"date": [1200000, 0],
"desc": "no person",
"bookmarks": [],
"parents": ["b608e9d1a3f0273ccf70fb85fd6866b3482bf965"]
},
{
"rev": 1,
"node": "b608e9d1a3f0273ccf70fb85fd6866b3482bf965",
"branch": "default",
"phase": "draft",
"user": "A. N. Other <other@place>",
"date": [1100000, 0],
"desc": "other 1\nother 2\n\nother 3",
"bookmarks": [],
"parents": ["1e4e1b8f71e05681d422154f5421e385fec3454f"]
},
{
"rev": 0,
"node": "1e4e1b8f71e05681d422154f5421e385fec3454f",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "line 1\nline 2",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"]
}
]"""
sh % "hg heads -v -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"files": ["fourth", "second", "third"]
},
{
"rev": 6,
"node": "f7e5795620e78993ad76680c4306bb2da83907b3",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500001, 0],
"desc": "merge",
"bookmarks": [],
"parents": ["13207e5a10d9fd28ec424934298e176197f2c67f", "07fa1db1064879a32157227401eb44b322ae53ce"],
"files": []
}
]"""
sh % "hg log --debug -Tjson" == r"""
[
{
"rev": 8,
"node": "209edb6a18483c1434e4006bca4c2b1ee5e7090a",
"branch": "default",
"phase": "draft",
"user": "test",
"date": [1577872860, 0],
"desc": "third",
"bookmarks": [],
"parents": ["88058a185da202d22e8ee0bb4d3515ff0ecb222b"],
"manifest": "102f85d6546830d0894e5420cdddaa12fe270c02",
"extra": {"branch": "default"},
"modified": [],
"added": ["fourth", "third"],
"removed": ["second"]
},
{
"rev": 7,
"node": "88058a185da202d22e8ee0bb4d3515ff0ecb222b",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "second",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"],
"manifest": "e3aa144e25d914ea34006bd7b3c266b7eb283c61",
"extra": {"branch": "default"},
"modified": [],
"added": ["second"],
"removed": []
},
{
"rev": 6,
"node": "f7e5795620e78993ad76680c4306bb2da83907b3",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500001, 0],
"desc": "merge",
"bookmarks": [],
"parents": ["13207e5a10d9fd28ec424934298e176197f2c67f", "07fa1db1064879a32157227401eb44b322ae53ce"],
"manifest": "4dc3def4f9b4c6e8de820f6ee74737f91e96a216",
"extra": {"branch": "default"},
"modified": [],
"added": [],
"removed": []
},
{
"rev": 5,
"node": "13207e5a10d9fd28ec424934298e176197f2c67f",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1500000, 0],
"desc": "new head",
"bookmarks": [],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"],
"manifest": "4dc3def4f9b4c6e8de820f6ee74737f91e96a216",
"extra": {"branch": "default"},
"modified": [],
"added": ["d"],
"removed": []
},
{
"rev": 4,
"node": "07fa1db1064879a32157227401eb44b322ae53ce",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1400000, 0],
"desc": "new branch",
"bookmarks": ["foo"],
"parents": ["10e46f2dcbf4823578cf180f33ecf0b957964c47"],
"manifest": "cb5a1327723bada42f117e4c55a303246eaf9ccc",
"extra": {"branch": "default"},
"modified": [],
"added": [],
"removed": []
},
{
"rev": 3,
"node": "10e46f2dcbf4823578cf180f33ecf0b957964c47",
"branch": "default",
"phase": "draft",
"user": "person",
"date": [1300000, 0],
"desc": "no user, no domain",
"bookmarks": [],
"parents": ["97054abb4ab824450e9164180baf491ae0078465"],
"manifest": "cb5a1327723bada42f117e4c55a303246eaf9ccc",
"extra": {"branch": "default"},
"modified": ["c"],
"added": [],
"removed": []
},
{
"rev": 2,
"node": "97054abb4ab824450e9164180baf491ae0078465",
"branch": "default",
"phase": "draft",
"user": "other@place",
"date": [1200000, 0],
"desc": "no person",
"bookmarks": [],
"parents": ["b608e9d1a3f0273ccf70fb85fd6866b3482bf965"],
"manifest": "6e0e82995c35d0d57a52aca8da4e56139e06b4b1",
"extra": {"branch": "default"},
"modified": [],
"added": ["c"],
"removed": []
},
{
"rev": 1,
"node": "b608e9d1a3f0273ccf70fb85fd6866b3482bf965",
"branch": "default",
"phase": "draft",
"user": "A. N. Other <other@place>",
"date": [1100000, 0],
"desc": "other 1\nother 2\n\nother 3",
"bookmarks": [],
"parents": ["1e4e1b8f71e05681d422154f5421e385fec3454f"],
"manifest": "4e8d705b1e53e3f9375e0e60dc7b525d8211fe55",
"extra": {"branch": "default"},
"modified": [],
"added": ["b"],
"removed": []
},
{
"rev": 0,
"node": "1e4e1b8f71e05681d422154f5421e385fec3454f",
"branch": "default",
"phase": "draft",
"user": "User Name <user@hostname>",
"date": [1000000, 0],
"desc": "line 1\nline 2",
"bookmarks": [],
"parents": ["0000000000000000000000000000000000000000"],
"manifest": "a0c8bcbbb45c63b90b70ad007bf38961f64f2af0",
"extra": {"branch": "default"},
"modified": [],
"added": ["a"],
"removed": []
}
]"""
# Error if style not readable:
if feature.check(["unix-permissions", "no-root"]):
sh % "touch q"
os.chmod("q", 0)
sh % "hg log --style ./q" == r"""
abort: Permission denied: ./q
(current process runs with uid 42)
(./q: mode 0o52, uid 42, gid 42)
(.: mode 0o52, uid 42, gid 42)
[255]"""
# Error if no style:
sh % "hg log --style notexist" == r"""
abort: style 'notexist' not found
(available styles: bisect, changelog, compact, default, phases, show, status, xml)
[255]"""
sh % "hg log -T list" == r"""
available styles: bisect, changelog, compact, default, phases, show, status, xml
abort: specify a template
[255]"""
# Error if style missing key:
sh % "echo 'q = q'" > "t"
sh % "hg log --style ./t" == r"""
abort: "changeset" not in template map
[255]"""
# Error if style missing value:
sh % "echo 'changeset ='" > "t"
sh % "hg log --style t" == r"""
hg: parse error at t:1: missing value
[255]"""
# Error if include fails:
sh % "echo 'changeset = q'" >> "t"
if feature.check(["unix-permissions", "no-root"]):
sh % "hg log --style ./t" == r"""
abort: template file ./q: Permission denied
[255]"""
sh % "rm -f q"
# Include works:
sh % "echo '{rev}'" > "q"
sh % "hg log --style ./t" == r"""
8
7
6
5
4
3
2
1
0"""
# Check that recursive reference does not fall into RuntimeError (issue4758):
# common mistake:
sh % "cat" << r"""
changeset = '{changeset}\n'
""" > "issue4758"
sh % "hg log --style ./issue4758" == r"""
abort: recursive reference 'changeset' in template
[255]"""
# circular reference:
sh % "cat" << r"""
changeset = '{foo}'
foo = '{changeset}'
""" > "issue4758"
sh % "hg log --style ./issue4758" == r"""
abort: recursive reference 'foo' in template
[255]"""
# buildmap() -> gettemplate(), where no thunk was made:
sh % "cat" << r"""
changeset = '{files % changeset}\n'
""" > "issue4758"
sh % "hg log --style ./issue4758" == r"""
abort: recursive reference 'changeset' in template
[255]"""
# not a recursion if a keyword of the same name exists:
sh % "cat" << r"""
changeset = '{bookmarks % rev}'
rev = '{rev} {bookmark}\n'
""" > "issue4758"
sh % "hg log --style ./issue4758 -r tip" == ""
# Check that {phase} works correctly on parents:
sh % "cat" << r"""
changeset_debug = '{rev} ({phase}):{parents}\n'
parent = ' {rev} ({phase})'
""" > "parentphase"
sh % "hg debugmakepublic 5"
sh % "hg log --debug -G --style ./parentphase" == r"""
@ 8 (draft): 7 (draft)
│
o 7 (draft): -1 (public)
o 6 (draft): 5 (public) 4 (draft)
├─╮
│ o 5 (public): 3 (public)
│ │
o │ 4 (draft): 3 (public)
├─╯
o 3 (public): 2 (public)
│
o 2 (public): 1 (public)
│
o 1 (public): 0 (public)
│
o 0 (public): -1 (public)"""
# Missing non-standard names give no error (backward compatibility):
sh % "echo 'changeset = '\\''{c}'\\'''" > "t"
sh % "hg log --style ./t"
# Defining non-standard name works:
sh % "cat" << r"""
changeset = '{c}'
c = q
""" > "t"
sh % "hg log --style ./t" == r"""
8
7
6
5
4
3
2
1
0"""
# ui.style works:
sh % "echo '[ui]'" > ".hg/hgrc"
sh % "echo 'style = t'" >> ".hg/hgrc"
sh % "hg log" == r"""
8
7
6
5
4
3
2
1
0"""
# Issue338:
sh % "hg log '--style=changelog'" > "changelog"
sh % "cat changelog" == r"""
2020-01-01 test <test>
* fourth, second, third:
third
[209edb6a1848]
1970-01-12 User Name <user@hostname>
* second:
second
[88058a185da2]
1970-01-18 person <person>
* merge
[f7e5795620e7]
* d:
new head
[13207e5a10d9]
1970-01-17 person <person>
* new branch
[07fa1db10648]
1970-01-16 person <person>
* c:
no user, no domain
[10e46f2dcbf4]
1970-01-14 other <other@place>
* c:
no person
[97054abb4ab8]
1970-01-13 A. N. Other <other@place>
* b:
other 1 other 2
other 3
[b608e9d1a3f0]
1970-01-12 User Name <user@hostname>
* a:
line 1 line 2
[1e4e1b8f71e0]"""
# Issue2130: xml output for 'hg heads' is malformed
sh % "hg heads --style changelog" == r"""
2020-01-01 test <test>
* fourth, second, third:
third
[209edb6a1848]
1970-01-18 person <person>
* merge
[f7e5795620e7]"""
# Keys work:
out = []
for (
key
) in "author branch branches date desc file_adds file_dels file_mods file_copies file_copies_switch files manifest node parents rev diffstat extras p1rev p2rev p1node p2node".split():
for mode in ["", "--verbose", "--debug"]:
args = ["log", mode, "-T", "%s%s: {%s}\\n" % (key, mode, key)]
out += [l.strip() for l in sh.hg(*args).splitlines()]
eq(
"\n".join(out),
r"""
author: test
author: User Name <user@hostname>
author: person
author: person
author: person
author: person
author: other@place
author: A. N. Other <other@place>
author: User Name <user@hostname>
author--verbose: test
author--verbose: User Name <user@hostname>
author--verbose: person
author--verbose: person
author--verbose: person
author--verbose: person
author--verbose: other@place
author--verbose: A. N. Other <other@place>
author--verbose: User Name <user@hostname>
author--debug: test
author--debug: User Name <user@hostname>
author--debug: person
author--debug: person
author--debug: person
author--debug: person
author--debug: other@place
author--debug: A. N. Other <other@place>
author--debug: User Name <user@hostname>
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--verbose: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branch--debug: default
branches:
branches:
branches:
branches:
branches:
branches:
branches:
branches:
branches:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--verbose:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
branches--debug:
date: 1577872860.00
date: 1000000.00
date: 1500001.00
date: 1500000.00
date: 1400000.00
date: 1300000.00
date: 1200000.00
date: 1100000.00
date: 1000000.00
date--verbose: 1577872860.00
date--verbose: 1000000.00
date--verbose: 1500001.00
date--verbose: 1500000.00
date--verbose: 1400000.00
date--verbose: 1300000.00
date--verbose: 1200000.00
date--verbose: 1100000.00
date--verbose: 1000000.00
date--debug: 1577872860.00
date--debug: 1000000.00
date--debug: 1500001.00
date--debug: 1500000.00
date--debug: 1400000.00
date--debug: 1300000.00
date--debug: 1200000.00
date--debug: 1100000.00
date--debug: 1000000.00
desc: third
desc: second
desc: merge
desc: new head
desc: new branch
desc: no user, no domain
desc: no person
desc: other 1
other 2
other 3
desc: line 1
line 2
desc--verbose: third
desc--verbose: second
desc--verbose: merge
desc--verbose: new head
desc--verbose: new branch
desc--verbose: no user, no domain
desc--verbose: no person
desc--verbose: other 1
other 2
other 3
desc--verbose: line 1
line 2
desc--debug: third
desc--debug: second
desc--debug: merge
desc--debug: new head
desc--debug: new branch
desc--debug: no user, no domain
desc--debug: no person
desc--debug: other 1
other 2
other 3
desc--debug: line 1
line 2
file_adds: fourth third
file_adds: second
file_adds:
file_adds: d
file_adds:
file_adds:
file_adds: c
file_adds: b
file_adds: a
file_adds--verbose: fourth third
file_adds--verbose: second
file_adds--verbose:
file_adds--verbose: d
file_adds--verbose:
file_adds--verbose:
file_adds--verbose: c
file_adds--verbose: b
file_adds--verbose: a
file_adds--debug: fourth third
file_adds--debug: second
file_adds--debug:
file_adds--debug: d
file_adds--debug:
file_adds--debug:
file_adds--debug: c
file_adds--debug: b
file_adds--debug: a
file_dels: second
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels:
file_dels--verbose: second
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--verbose:
file_dels--debug: second
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_dels--debug:
file_mods:
file_mods:
file_mods:
file_mods:
file_mods:
file_mods: c
file_mods:
file_mods:
file_mods:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--verbose: c
file_mods--verbose:
file_mods--verbose:
file_mods--verbose:
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_mods--debug: c
file_mods--debug:
file_mods--debug:
file_mods--debug:
file_copies: fourth (second)
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies:
file_copies--verbose: fourth (second)
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--verbose:
file_copies--debug: fourth (second)
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies--debug:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--verbose:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
file_copies_switch--debug:
files: fourth second third
files: second
files:
files: d
files:
files: c
files: c
files: b
files: a
files--verbose: fourth second third
files--verbose: second
files--verbose:
files--verbose: d
files--verbose:
files--verbose: c
files--verbose: c
files--verbose: b
files--verbose: a
files--debug: fourth second third
files--debug: second
files--debug:
files--debug: d
files--debug:
files--debug: c
files--debug: c
files--debug: b
files--debug: a
manifest: 102f85d65468
manifest: e3aa144e25d9
manifest: 4dc3def4f9b4
manifest: 4dc3def4f9b4
manifest: cb5a1327723b
manifest: cb5a1327723b
manifest: 6e0e82995c35
manifest: 4e8d705b1e53
manifest: a0c8bcbbb45c
manifest--verbose: 102f85d65468
manifest--verbose: e3aa144e25d9
manifest--verbose: 4dc3def4f9b4
manifest--verbose: 4dc3def4f9b4
manifest--verbose: cb5a1327723b
manifest--verbose: cb5a1327723b
manifest--verbose: 6e0e82995c35
manifest--verbose: 4e8d705b1e53
manifest--verbose: a0c8bcbbb45c
manifest--debug: 102f85d6546830d0894e5420cdddaa12fe270c02
manifest--debug: e3aa144e25d914ea34006bd7b3c266b7eb283c61
manifest--debug: 4dc3def4f9b4c6e8de820f6ee74737f91e96a216
manifest--debug: 4dc3def4f9b4c6e8de820f6ee74737f91e96a216
manifest--debug: cb5a1327723bada42f117e4c55a303246eaf9ccc
manifest--debug: cb5a1327723bada42f117e4c55a303246eaf9ccc
manifest--debug: 6e0e82995c35d0d57a52aca8da4e56139e06b4b1
manifest--debug: 4e8d705b1e53e3f9375e0e60dc7b525d8211fe55
manifest--debug: a0c8bcbbb45c63b90b70ad007bf38961f64f2af0
node: 209edb6a18483c1434e4006bca4c2b1ee5e7090a
node: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
node: f7e5795620e78993ad76680c4306bb2da83907b3
node: 13207e5a10d9fd28ec424934298e176197f2c67f
node: 07fa1db1064879a32157227401eb44b322ae53ce
node: 10e46f2dcbf4823578cf180f33ecf0b957964c47
node: 97054abb4ab824450e9164180baf491ae0078465
node: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
node: 1e4e1b8f71e05681d422154f5421e385fec3454f
node--verbose: 209edb6a18483c1434e4006bca4c2b1ee5e7090a
node--verbose: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
node--verbose: f7e5795620e78993ad76680c4306bb2da83907b3
node--verbose: 13207e5a10d9fd28ec424934298e176197f2c67f
node--verbose: 07fa1db1064879a32157227401eb44b322ae53ce
node--verbose: 10e46f2dcbf4823578cf180f33ecf0b957964c47
node--verbose: 97054abb4ab824450e9164180baf491ae0078465
node--verbose: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
node--verbose: 1e4e1b8f71e05681d422154f5421e385fec3454f
node--debug: 209edb6a18483c1434e4006bca4c2b1ee5e7090a
node--debug: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
node--debug: f7e5795620e78993ad76680c4306bb2da83907b3
node--debug: 13207e5a10d9fd28ec424934298e176197f2c67f
node--debug: 07fa1db1064879a32157227401eb44b322ae53ce
node--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
node--debug: 97054abb4ab824450e9164180baf491ae0078465
node--debug: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
node--debug: 1e4e1b8f71e05681d422154f5421e385fec3454f
parents: 88058a185da2
parents: 000000000000
parents: 13207e5a10d9 07fa1db10648
parents: 10e46f2dcbf4
parents: 10e46f2dcbf4
parents: 97054abb4ab8
parents: b608e9d1a3f0
parents: 1e4e1b8f71e0
parents: 000000000000
parents--verbose: 88058a185da2
parents--verbose: 000000000000
parents--verbose: 13207e5a10d9 07fa1db10648
parents--verbose: 10e46f2dcbf4
parents--verbose: 10e46f2dcbf4
parents--verbose: 97054abb4ab8
parents--verbose: b608e9d1a3f0
parents--verbose: 1e4e1b8f71e0
parents--verbose: 000000000000
parents--debug: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
parents--debug: 0000000000000000000000000000000000000000
parents--debug: 13207e5a10d9fd28ec424934298e176197f2c67f 07fa1db1064879a32157227401eb44b322ae53ce
parents--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
parents--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
parents--debug: 97054abb4ab824450e9164180baf491ae0078465
parents--debug: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
parents--debug: 1e4e1b8f71e05681d422154f5421e385fec3454f
parents--debug: 0000000000000000000000000000000000000000
rev: 8
rev: 7
rev: 6
rev: 5
rev: 4
rev: 3
rev: 2
rev: 1
rev: 0
rev--verbose: 8
rev--verbose: 7
rev--verbose: 6
rev--verbose: 5
rev--verbose: 4
rev--verbose: 3
rev--verbose: 2
rev--verbose: 1
rev--verbose: 0
rev--debug: 8
rev--debug: 7
rev--debug: 6
rev--debug: 5
rev--debug: 4
rev--debug: 3
rev--debug: 2
rev--debug: 1
rev--debug: 0
diffstat: 3: +2/-1
diffstat: 1: +1/-0
diffstat: 0: +0/-0
diffstat: 1: +1/-0
diffstat: 0: +0/-0
diffstat: 1: +1/-0
diffstat: 1: +4/-0
diffstat: 1: +2/-0
diffstat: 1: +1/-0
diffstat--verbose: 3: +2/-1
diffstat--verbose: 1: +1/-0
diffstat--verbose: 0: +0/-0
diffstat--verbose: 1: +1/-0
diffstat--verbose: 0: +0/-0
diffstat--verbose: 1: +1/-0
diffstat--verbose: 1: +4/-0
diffstat--verbose: 1: +2/-0
diffstat--verbose: 1: +1/-0
diffstat--debug: 3: +2/-1
diffstat--debug: 1: +1/-0
diffstat--debug: 0: +0/-0
diffstat--debug: 1: +1/-0
diffstat--debug: 0: +0/-0
diffstat--debug: 1: +1/-0
diffstat--debug: 1: +4/-0
diffstat--debug: 1: +2/-0
diffstat--debug: 1: +1/-0
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--verbose: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
extras--debug: branch=default
p1rev: 7
p1rev: -1
p1rev: 5
p1rev: 3
p1rev: 3
p1rev: 2
p1rev: 1
p1rev: 0
p1rev: -1
p1rev--verbose: 7
p1rev--verbose: -1
p1rev--verbose: 5
p1rev--verbose: 3
p1rev--verbose: 3
p1rev--verbose: 2
p1rev--verbose: 1
p1rev--verbose: 0
p1rev--verbose: -1
p1rev--debug: 7
p1rev--debug: -1
p1rev--debug: 5
p1rev--debug: 3
p1rev--debug: 3
p1rev--debug: 2
p1rev--debug: 1
p1rev--debug: 0
p1rev--debug: -1
p2rev: -1
p2rev: -1
p2rev: 4
p2rev: -1
p2rev: -1
p2rev: -1
p2rev: -1
p2rev: -1
p2rev: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: 4
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--verbose: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: 4
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p2rev--debug: -1
p1node: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
p1node: 0000000000000000000000000000000000000000
p1node: 13207e5a10d9fd28ec424934298e176197f2c67f
p1node: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node: 97054abb4ab824450e9164180baf491ae0078465
p1node: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
p1node: 1e4e1b8f71e05681d422154f5421e385fec3454f
p1node: 0000000000000000000000000000000000000000
p1node--verbose: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
p1node--verbose: 0000000000000000000000000000000000000000
p1node--verbose: 13207e5a10d9fd28ec424934298e176197f2c67f
p1node--verbose: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--verbose: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--verbose: 97054abb4ab824450e9164180baf491ae0078465
p1node--verbose: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
p1node--verbose: 1e4e1b8f71e05681d422154f5421e385fec3454f
p1node--verbose: 0000000000000000000000000000000000000000
p1node--debug: 88058a185da202d22e8ee0bb4d3515ff0ecb222b
p1node--debug: 0000000000000000000000000000000000000000
p1node--debug: 13207e5a10d9fd28ec424934298e176197f2c67f
p1node--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--debug: 10e46f2dcbf4823578cf180f33ecf0b957964c47
p1node--debug: 97054abb4ab824450e9164180baf491ae0078465
p1node--debug: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
p1node--debug: 1e4e1b8f71e05681d422154f5421e385fec3454f
p1node--debug: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 07fa1db1064879a32157227401eb44b322ae53ce
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 07fa1db1064879a32157227401eb44b322ae53ce
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--verbose: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 07fa1db1064879a32157227401eb44b322ae53ce
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000
p2node--debug: 0000000000000000000000000000000000000000""",
)
# Filters work:
sh % "hg log --template '{author|domain}\\n'" == r"""
hostname
place
place
hostname"""
sh % "hg log --template '{author|person}\\n'" == r"""
test
User Name
person
person
person
person
other
A. N. Other
User Name"""
sh % "hg log --template '{author|user}\\n'" == r"""
test
user
person
person
person
person
other
other
user"""
sh % "hg log --template '{date|date}\\n'" == r"""
Wed Jan 01 10:01:00 2020 +0000
Mon Jan 12 13:46:40 1970 +0000
Sun Jan 18 08:40:01 1970 +0000
Sun Jan 18 08:40:00 1970 +0000
Sat Jan 17 04:53:20 1970 +0000
Fri Jan 16 01:06:40 1970 +0000
Wed Jan 14 21:20:00 1970 +0000
Tue Jan 13 17:33:20 1970 +0000
Mon Jan 12 13:46:40 1970 +0000"""
sh % "hg log --template '{date|isodate}\\n'" == r"""
2020-01-01 10:01 +0000
1970-01-12 13:46 +0000
1970-01-18 08:40 +0000
1970-01-18 08:40 +0000
1970-01-17 04:53 +0000
1970-01-16 01:06 +0000
1970-01-14 21:20 +0000
1970-01-13 17:33 +0000
1970-01-12 13:46 +0000"""
sh % "hg log --template '{date|isodatesec}\\n'" == r"""
2020-01-01 10:01:00 +0000
1970-01-12 13:46:40 +0000
1970-01-18 08:40:01 +0000
1970-01-18 08:40:00 +0000
1970-01-17 04:53:20 +0000
1970-01-16 01:06:40 +0000
1970-01-14 21:20:00 +0000
1970-01-13 17:33:20 +0000
1970-01-12 13:46:40 +0000"""
sh % "hg log --template '{date|rfc822date}\\n'" == r"""
Wed, 01 Jan 2020 10:01:00 +0000
Mon, 12 Jan 1970 13:46:40 +0000
Sun, 18 Jan 1970 08:40:01 +0000
Sun, 18 Jan 1970 08:40:00 +0000
Sat, 17 Jan 1970 04:53:20 +0000
Fri, 16 Jan 1970 01:06:40 +0000
Wed, 14 Jan 1970 21:20:00 +0000
Tue, 13 Jan 1970 17:33:20 +0000
Mon, 12 Jan 1970 13:46:40 +0000"""
sh % "hg log --template '{desc|firstline}\\n'" == r"""
third
second
merge
new head
new branch
no user, no domain
no person
other 1
line 1"""
sh % "hg log --template '{node|short}\\n'" == r"""
209edb6a1848
88058a185da2
f7e5795620e7
13207e5a10d9
07fa1db10648
10e46f2dcbf4
97054abb4ab8
b608e9d1a3f0
1e4e1b8f71e0"""
sh % "hg log --template '<changeset author=\"{author|xmlescape}\"/>\n'" == r"""
<changeset author="test"/>
<changeset author="User Name <user@hostname>"/>
<changeset author="person"/>
<changeset author="person"/>
<changeset author="person"/>
<changeset author="person"/>
<changeset author="other@place"/>
<changeset author="A. N. Other <other@place>"/>
<changeset author="User Name <user@hostname>"/>"""
sh % "hg log --template '{rev}: {children}\\n'" == r"""
8: (trailing space)
7: 209edb6a1848
6: (trailing space)
5: f7e5795620e7
4: f7e5795620e7
3: 07fa1db10648 13207e5a10d9
2: 10e46f2dcbf4
1: 97054abb4ab8
0: b608e9d1a3f0"""
# Formatnode filter works:
sh % "hg -q log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e0"
sh % "hg log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e0"
sh % "hg -v log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e0"
sh % "hg --debug log -r 0 --template '{node|formatnode}\\n'" == "1e4e1b8f71e05681d422154f5421e385fec3454f"
# Age filter:
sh % "hg init unstable-hash"
sh % "cd unstable-hash"
sh % "hg log --template '{date|age}\\n' '||' exit 1" > "/dev/null"
n = datetime.datetime.now() + datetime.timedelta(366 * 7)
s = "%d-%d-%d 00:00" % (n.year, n.month, n.day)
open("a", "wb").write(pycompat.encodeutf8(s))
sh % "hg add a"
sh % ("hg commit -m future -d '%s UTC'" % s)
sh % "hg log -l1 --template '{date|age}\\n'" == "7 years from now"
sh % "cd .."
# Add a dummy commit to make up for the instability of the above:
sh % "echo a" > "a"
sh % "hg add a"
sh % "hg ci -m future"
# Count filter:
sh % "hg log -l1 --template '{node|count} {node|short|count}\\n'" == "40 12"
sh % 'hg log -l1 --template \'{revset("null^")|count} {revset(".")|count} {revset("0::3")|count}\\n\'' == "0 1 4"
sh % "hg log -G --template '{rev}: children: {children|count}, file_adds: {file_adds|count}, ancestors: {revset(\"ancestors(%s)\", rev)|count}'" == r"""
@ 9: children: 0, file_adds: 1, ancestors: 3
│
o 8: children: 1, file_adds: 2, ancestors: 2
│
o 7: children: 1, file_adds: 1, ancestors: 1
o 6: children: 0, file_adds: 0, ancestors: 7
├─╮
│ o 5: children: 1, file_adds: 1, ancestors: 5
│ │
o │ 4: children: 1, file_adds: 0, ancestors: 5
├─╯
o 3: children: 2, file_adds: 0, ancestors: 4
│
o 2: children: 1, file_adds: 1, ancestors: 3
│
o 1: children: 1, file_adds: 1, ancestors: 2
│
o 0: children: 1, file_adds: 1, ancestors: 1"""
# Upper/lower filters:
sh % "hg log -r0 --template '{author|upper}\\n'" == "USER NAME <USER@HOSTNAME>"
sh % "hg log -r0 --template '{author|lower}\\n'" == "user name <user@hostname>"
sh % "hg log -r0 --template '{date|upper}\\n'" == r"""
abort: template filter 'upper' is not compatible with keyword 'date'
[255]"""
# Add a commit that does all possible modifications at once
sh % "echo modify" >> "third"
sh % "touch b"
sh % "hg add b"
sh % "hg mv fourth fifth"
sh % "hg rm a"
sh % "hg ci -m 'Modify, add, remove, rename'"
# Check the status template
(
sh % "cat"
<< r"""
[extensions]
color=
"""
>> "$HGRCPATH"
)
sh % "hg log -T status -r 10" == r"""
commit: bc9dfec3b3bc
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: Modify, add, remove, rename
files:
M third
A b
A fifth
R a
R fourth"""
sh % "hg log -T status -C -r 10" == r"""
commit: bc9dfec3b3bc
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: Modify, add, remove, rename
files:
M third
A b
A fifth
fourth
R a
R fourth"""
sh % "hg log -T status -C -r 10 -v" == r"""
commit: bc9dfec3b3bc
user: test
date: Thu Jan 01 00:00:00 1970 +0000
description:
Modify, add, remove, rename
files:
M third
A b
A fifth
fourth
R a
R fourth"""
sh % "hg log -T status -C -r 10 --debug" == r"""
commit: bc9dfec3b3bcc43c41a22000f3226b0c1085d5c1
phase: draft
manifest: 1685af69a14aa2346cfb01cf0e7f50ef176128b4
user: test
date: Thu Jan 01 00:00:00 1970 +0000
extra: branch=default
description:
Modify, add, remove, rename
files:
M third
A b
A fifth
fourth
R a
R fourth"""
sh % "hg log -T status -C -r 10 --quiet" == "bc9dfec3b3bc"
sh % "hg '--color=debug' log -T status -r 10" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bc]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[log.summary|summary: Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bc]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[log.summary|summary: Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.copied| fourth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10 -v" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bc]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[ui.note log.description|description:]
[ui.note log.description|Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.copied| fourth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10 --debug" == r"""
[log.changeset changeset.draft|commit: bc9dfec3b3bcc43c41a22000f3226b0c1085d5c1]
[log.phase|phase: draft]
[ui.debug log.manifest|manifest: 1685af69a14aa2346cfb01cf0e7f50ef176128b4]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|Modify, add, remove, rename]
[ui.note log.files|files:]
[status.modified|M third]
[status.added|A b]
[status.added|A fifth]
[status.copied| fourth]
[status.removed|R a]
[status.removed|R fourth]"""
sh % "hg '--color=debug' log -T status -C -r 10 --quiet" == "[log.node|bc9dfec3b3bc]"
# Check the bisect template
sh % "hg bisect -g 1"
sh % "hg bisect -b 3 --noupdate" == "Testing changeset 97054abb4ab8 (2 changesets remaining, ~1 tests)"
sh % "hg log -T bisect -r '0:4'" == r"""
commit: 1e4e1b8f71e0
bisect: good (implicit)
user: User Name <user@hostname>
date: Mon Jan 12 13:46:40 1970 +0000
summary: line 1
commit: b608e9d1a3f0
bisect: good
user: A. N. Other <other@place>
date: Tue Jan 13 17:33:20 1970 +0000
summary: other 1
commit: 97054abb4ab8
bisect: untested
user: other@place
date: Wed Jan 14 21:20:00 1970 +0000
summary: no person
commit: 10e46f2dcbf4
bisect: bad
user: person
date: Fri Jan 16 01:06:40 1970 +0000
summary: no user, no domain
commit: 07fa1db10648
bisect: bad (implicit)
bookmark: foo
user: person
date: Sat Jan 17 04:53:20 1970 +0000
summary: new branch"""
sh % "hg log --debug -T bisect -r '0:4'" == r"""
commit: 1e4e1b8f71e05681d422154f5421e385fec3454f
bisect: good (implicit)
phase: public
manifest: a0c8bcbbb45c63b90b70ad007bf38961f64f2af0
user: User Name <user@hostname>
date: Mon Jan 12 13:46:40 1970 +0000
files+: a
extra: branch=default
description:
line 1
line 2
commit: b608e9d1a3f0273ccf70fb85fd6866b3482bf965
bisect: good
phase: public
manifest: 4e8d705b1e53e3f9375e0e60dc7b525d8211fe55
user: A. N. Other <other@place>
date: Tue Jan 13 17:33:20 1970 +0000
files+: b
extra: branch=default
description:
other 1
other 2
other 3
commit: 97054abb4ab824450e9164180baf491ae0078465
bisect: untested
phase: public
manifest: 6e0e82995c35d0d57a52aca8da4e56139e06b4b1
user: other@place
date: Wed Jan 14 21:20:00 1970 +0000
files+: c
extra: branch=default
description:
no person
commit: 10e46f2dcbf4823578cf180f33ecf0b957964c47
bisect: bad
phase: public
manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc
user: person
date: Fri Jan 16 01:06:40 1970 +0000
files: c
extra: branch=default
description:
no user, no domain
commit: 07fa1db1064879a32157227401eb44b322ae53ce
bisect: bad (implicit)
bookmark: foo
phase: draft
manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc
user: person
date: Sat Jan 17 04:53:20 1970 +0000
extra: branch=default
description:
new branch"""
sh % "hg log -v -T bisect -r '0:4'" == r"""
commit: 1e4e1b8f71e0
bisect: good (implicit)
user: User Name <user@hostname>
date: Mon Jan 12 13:46:40 1970 +0000
files: a
description:
line 1
line 2
commit: b608e9d1a3f0
bisect: good
user: A. N. Other <other@place>
date: Tue Jan 13 17:33:20 1970 +0000
files: b
description:
other 1
other 2
other 3
commit: 97054abb4ab8
bisect: untested
user: other@place
date: Wed Jan 14 21:20:00 1970 +0000
files: c
description:
no person
commit: 10e46f2dcbf4
bisect: bad
user: person
date: Fri Jan 16 01:06:40 1970 +0000
files: c
description:
no user, no domain
commit: 07fa1db10648
bisect: bad (implicit)
bookmark: foo
user: person
date: Sat Jan 17 04:53:20 1970 +0000
description:
new branch"""
sh % "hg '--color=debug' log -T bisect -r '0:4'" == r"""
[log.changeset changeset.public|commit: 1e4e1b8f71e0]
[log.bisect bisect.good|bisect: good (implicit)]
[log.user|user: User Name <user@hostname>]
[log.date|date: Mon Jan 12 13:46:40 1970 +0000]
[log.summary|summary: line 1]
[log.changeset changeset.public|commit: b608e9d1a3f0]
[log.bisect bisect.good|bisect: good]
[log.user|user: A. N. Other <other@place>]
[log.date|date: Tue Jan 13 17:33:20 1970 +0000]
[log.summary|summary: other 1]
[log.changeset changeset.public|commit: 97054abb4ab8]
[log.bisect bisect.untested|bisect: untested]
[log.user|user: other@place]
[log.date|date: Wed Jan 14 21:20:00 1970 +0000]
[log.summary|summary: no person]
[log.changeset changeset.public|commit: 10e46f2dcbf4]
[log.bisect bisect.bad|bisect: bad]
[log.user|user: person]
[log.date|date: Fri Jan 16 01:06:40 1970 +0000]
[log.summary|summary: no user, no domain]
[log.changeset changeset.draft|commit: 07fa1db10648]
[log.bisect bisect.bad|bisect: bad (implicit)]
[log.bookmark|bookmark: foo]
[log.user|user: person]
[log.date|date: Sat Jan 17 04:53:20 1970 +0000]
[log.summary|summary: new branch]"""
sh % "hg '--color=debug' log --debug -T bisect -r '0:4'" == r"""
[log.changeset changeset.public|commit: 1e4e1b8f71e05681d422154f5421e385fec3454f]
[log.bisect bisect.good|bisect: good (implicit)]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: a0c8bcbbb45c63b90b70ad007bf38961f64f2af0]
[log.user|user: User Name <user@hostname>]
[log.date|date: Mon Jan 12 13:46:40 1970 +0000]
[ui.debug log.files|files+: a]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|line 1
line 2]
[log.changeset changeset.public|commit: b608e9d1a3f0273ccf70fb85fd6866b3482bf965]
[log.bisect bisect.good|bisect: good]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: 4e8d705b1e53e3f9375e0e60dc7b525d8211fe55]
[log.user|user: A. N. Other <other@place>]
[log.date|date: Tue Jan 13 17:33:20 1970 +0000]
[ui.debug log.files|files+: b]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|other 1
other 2
other 3]
[log.changeset changeset.public|commit: 97054abb4ab824450e9164180baf491ae0078465]
[log.bisect bisect.untested|bisect: untested]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: 6e0e82995c35d0d57a52aca8da4e56139e06b4b1]
[log.user|user: other@place]
[log.date|date: Wed Jan 14 21:20:00 1970 +0000]
[ui.debug log.files|files+: c]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|no person]
[log.changeset changeset.public|commit: 10e46f2dcbf4823578cf180f33ecf0b957964c47]
[log.bisect bisect.bad|bisect: bad]
[log.phase|phase: public]
[ui.debug log.manifest|manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc]
[log.user|user: person]
[log.date|date: Fri Jan 16 01:06:40 1970 +0000]
[ui.debug log.files|files: c]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|no user, no domain]
[log.changeset changeset.draft|commit: 07fa1db1064879a32157227401eb44b322ae53ce]
[log.bisect bisect.bad|bisect: bad (implicit)]
[log.bookmark|bookmark: foo]
[log.phase|phase: draft]
[ui.debug log.manifest|manifest: cb5a1327723bada42f117e4c55a303246eaf9ccc]
[log.user|user: person]
[log.date|date: Sat Jan 17 04:53:20 1970 +0000]
[ui.debug log.extra|extra: branch=default]
[ui.note log.description|description:]
[ui.note log.description|new branch]"""
sh % "hg '--color=debug' log -v -T bisect -r '0:4'" == r"""
[log.changeset changeset.public|commit: 1e4e1b8f71e0]
[log.bisect bisect.good|bisect: good (implicit)]
[log.user|user: User Name <user@hostname>]
[log.date|date: Mon Jan 12 13:46:40 1970 +0000]
[ui.note log.files|files: a]
[ui.note log.description|description:]
[ui.note log.description|line 1
line 2]
[log.changeset changeset.public|commit: b608e9d1a3f0]
[log.bisect bisect.good|bisect: good]
[log.user|user: A. N. Other <other@place>]
[log.date|date: Tue Jan 13 17:33:20 1970 +0000]
[ui.note log.files|files: b]
[ui.note log.description|description:]
[ui.note log.description|other 1
other 2
other 3]
[log.changeset changeset.public|commit: 97054abb4ab8]
[log.bisect bisect.untested|bisect: untested]
[log.user|user: other@place]
[log.date|date: Wed Jan 14 21:20:00 1970 +0000]
[ui.note log.files|files: c]
[ui.note log.description|description:]
[ui.note log.description|no person]
[log.changeset changeset.public|commit: 10e46f2dcbf4]
[log.bisect bisect.bad|bisect: bad]
[log.user|user: person]
[log.date|date: Fri Jan 16 01:06:40 1970 +0000]
[ui.note log.files|files: c]
[ui.note log.description|description:]
[ui.note log.description|no user, no domain]
[log.changeset changeset.draft|commit: 07fa1db10648]
[log.bisect bisect.bad|bisect: bad (implicit)]
[log.bookmark|bookmark: foo]
[log.user|user: person]
[log.date|date: Sat Jan 17 04:53:20 1970 +0000]
[ui.note log.description|description:]
[ui.note log.description|new branch]"""
sh % "hg bisect --reset"
# Error on syntax:
sh % "echo 'x = \"f'" >> "t"
sh % "hg log" == r"""
hg: parse error at t:3: unmatched quotes
[255]"""
sh % "hg log -T '{date'" == r"""
hg: parse error at 1: unterminated template expansion
({date
^ here)
[255]"""
# Behind the scenes, this will throw TypeError
sh % "hg log -l 3 --template '{date|obfuscate}\\n'" == r"""
abort: template filter 'obfuscate' is not compatible with keyword 'date'
[255]"""
# Behind the scenes, this will throw a ValueError
sh % "hg log -l 3 --template 'line: {desc|shortdate}\\n'" == r"""
abort: template filter 'shortdate' is not compatible with keyword 'desc'
[255]"""
# Behind the scenes, this will throw AttributeError
sh % "hg log -l 3 --template 'line: {date|escape}\\n'" == r"""
abort: template filter 'escape' is not compatible with keyword 'date'
[255]"""
sh % "hg log -l 3 --template 'line: {extras|localdate}\\n'" == r"""
hg: parse error: localdate expects a date information
[255]"""
# Behind the scenes, this will throw ValueError
sh % "hg tip --template '{author|email|date}\\n'" == r"""
hg: parse error: date expects a date information
[255]"""
sh % "hg tip -T '{author|email|shortdate}\\n'" == r"""
abort: template filter 'shortdate' is not compatible with keyword 'author'
[255]"""
sh % "hg tip -T '{get(extras, \"branch\")|shortdate}\\n'" == r"""
abort: incompatible use of template filter 'shortdate'
[255]"""
# Error in nested template:
sh % "hg log -T '{\"date'" == r"""
hg: parse error at 2: unterminated string
({"date
^ here)
[255]"""
sh % "hg log -T '{\"foo{date|?}\"}'" == r"""
hg: parse error at 11: syntax error
({"foo{date|?}"}
^ here)
[255]"""
# Thrown an error if a template function doesn't exist
sh % "hg tip --template '{foo()}\\n'" == r"""
hg: parse error: unknown function 'foo'
[255]"""
# Pass generator object created by template function to filter
sh % "hg log -l 1 --template '{if(author, author)|user}\\n'" == "test"
# Test index keyword:
sh % "hg log -l 2 -T '{index + 10}{files % \" {index}:{file}\"}\\n'" == r"""
10 0:a 1:b 2:fifth 3:fourth 4:third
11 0:a"""
# Test diff function:
sh % "hg diff -c 8" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 second
--- a/second Mon Jan 12 13:46:40 1970 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
"""
+ (" -🥈\udce2(\udca1" if is_py3 else " -🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 third
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/third Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
+third"""
)
sh % "hg log -r 8 -T '{diff()}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 second
--- a/second Mon Jan 12 13:46:40 1970 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
"""
+ (" -🥈\udce2(\udca1" if is_py3 else " -🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 third
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/third Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
+third"""
)
sh % "hg log -r 8 -T '{diff('\\''glob:f*'\\'')}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
)
sh % "hg log -r 8 -T '{diff('\\'''\\'', '\\''glob:f*'\\'')}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 second
--- a/second Mon Jan 12 13:46:40 1970 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
"""
+ (" -🥈\udce2(\udca1" if is_py3 else " -🥈\xe2\x28\xa1")
+ """
diff -r 88058a185da2 -r 209edb6a1848 third
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/third Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
+third"""
)
sh % "hg log -r 8 -T '{diff('\\''FOURTH'\\''|lower)}'" == (
r"""
diff -r 88058a185da2 -r 209edb6a1848 fourth
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/fourth Wed Jan 01 10:01:00 2020 +0000
@@ -0,0 +1,1 @@
"""
+ (" +🥈\udce2(\udca1" if is_py3 else " +🥈\xe2\x28\xa1")
)
sh % "hg log -r 8 -T '{diff()|json}'" == '"diff -r 88058a185da2 -r 209edb6a1848 fourth\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/fourth\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+\\ud83e\\udd48\\udce2(\\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 second\\n--- a/second\\tMon Jan 12 13:46:40 1970 +0000\\n+++ /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n@@ -1,1 +0,0 @@\\n-\\ud83e\\udd48\\udce2(\\udca1\\ndiff -r 88058a185da2 -r 209edb6a1848 third\\n--- /dev/null\\tThu Jan 01 00:00:00 1970 +0000\\n+++ b/third\\tWed Jan 01 10:01:00 2020 +0000\\n@@ -0,0 +1,1 @@\\n+third\\n"'
# ui verbosity:
sh % "hg log -l1 -T '{verbosity}\\n'"
sh % "hg log -l1 -T '{verbosity}\\n' --debug" == "debug"
sh % "hg log -l1 -T '{verbosity}\\n' --quiet" == "quiet"
sh % "hg log -l1 -T '{verbosity}\\n' --verbose" == "verbose"
sh % "cd .."
# latesttag:
sh % "hg init latesttag"
sh % "cd latesttag"
sh % "echo a" > "file"
sh % "hg ci -Am a -d '0 0'" == "adding file"
sh % "echo b" >> "file"
sh % "hg ci -m b -d '1 0'"
sh % "echo c" >> "head1"
sh % "hg ci -Am h1c -d '2 0'" == "adding head1"
sh % "hg update -q 1"
sh % "echo d" >> "head2"
sh % "hg ci -Am h2d -d '3 0'" == "adding head2"
sh % "echo e" >> "head2"
sh % "hg ci -m h2e -d '4 0'"
sh % "hg merge -q"
sh % "hg ci -m merge -d '5 -3600'"
sh % "cd .."
# Style path expansion: issue1948 - ui.style option doesn't work on OSX
# if it is a relative path
sh % "mkdir -p $TESTTMP/home/styles"
sh % "cat" << r"""
changeset = 'test {rev}:{node|short}\n'
""" > "$TESTTMP/home/styles/teststyle"
sh % "cat" << r"""
[ui]
style = $TESTTMP/home/styles/teststyle
""" > "latesttag/.hg/hgrc"
sh % "hg -R latesttag tip" == "test 5:888bdaa97ddd"
# Test recursive showlist template (issue1989):
sh % "cat" << r"""
changeset = '{file_mods}{manifest}{extras}'
file_mod = 'M|{author|person}\n'
manifest = '{rev},{author}\n'
extra = '{key}: {author}\n'
""" > "style1989"
sh % "hg -R latesttag log -r tip^ '--style=style1989'" == r"""
M|test
4,test
branch: test"""
# Test new-style inline templating:
sh % "hg log -R latesttag -r tip^ --template 'modified files: {file_mods % \" {file}\\n\"}\\n'" == "modified files: head2"
sh % "hg log -R latesttag -r tip^ -T '{rev % \"a\"}\\n'" == r"""
hg: parse error: keyword 'rev' is not iterable
[255]"""
sh % 'hg log -R latesttag -r tip^ -T \'{get(extras, "unknown") % "a"}\\n\'' == r"""
hg: parse error: None is not iterable
[255]"""
# Test new-style inline templating of non-list/dict type:
sh % "hg log -R latesttag -r tip -T '{manifest}\\n'" == "ed2d5d416a51"
sh % "hg log -R latesttag -r tip -T 'string length: {manifest|count}\\n'" == "string length: 12"
sh % "hg log -R latesttag -r tip -T '{manifest % \"{rev}:{node}\"}\\n'" == "5:ed2d5d416a513f3f19ab4cd41c793dcd8272a497"
sh % 'hg log -R latesttag -r tip -T \'{get(extras, "branch") % "{key}: {value}\\n"}\'' == "branch: default"
sh % 'hg log -R latesttag -r tip -T \'{get(extras, "unknown") % "{key}\\n"}\'' == r"""
hg: parse error: None is not iterable
[255]"""
sh % "hg log -R latesttag -r tip -T '{min(extras) % \"{key}: {value}\\n\"}'" == "branch: default"
sh % 'hg log -R latesttag -l1 -T \'{min(revset("0:5")) % "{rev}:{node|short}\\n"}\'' == "0:ce3cec86e6c2"
sh % 'hg log -R latesttag -l1 -T \'{max(revset("0:5")) % "{rev}:{node|short}\\n"}\'' == "5:888bdaa97ddd"
# Test manifest/get() can be join()-ed as before, though it's silly:
sh % "hg log -R latesttag -r tip -T '{join(manifest, \"\")}\\n'" == "ed2d5d416a51"
sh % 'hg log -R latesttag -r tip -T \'{join(get(extras, "branch"), "")}\\n\'' == "default"
# Test min/max of integers
sh % "hg log -R latesttag -l1 -T '{min(revset(\"4:5\"))}\\n'" == "4"
sh % "hg log -R latesttag -l1 -T '{max(revset(\"4:5\"))}\\n'" == "5"
# Test dot operator precedence:
sh % "hg debugtemplate -R latesttag -r0 -v '{manifest.node|short}\\n'" == r"""
(template
(|
(.
(symbol 'manifest')
(symbol 'node'))
(symbol 'short'))
(string '\n'))
89f4071fec70"""
# (the following examples are invalid, but seem natural in parsing POV)
sh % "hg debugtemplate -R latesttag -r0 -v '{foo|bar.baz}\\n'" == r"""
(template
(|
(symbol 'foo')
(.
(symbol 'bar')
(symbol 'baz')))
(string '\n'))
hg: parse error: expected a symbol, got '.'
[255]"""
sh % "hg debugtemplate -R latesttag -r0 -v '{foo.bar()}\\n'" == r"""
(template
(.
(symbol 'foo')
(func
(symbol 'bar')
None))
(string '\n'))
hg: parse error: expected a symbol, got 'func'
[255]"""
# Test evaluation of dot operator:
sh % "hg log -R latesttag -l1 -T '{min(revset(\"0:9\")).node}\\n'" == "ce3cec86e6c26bd9bdfc590a6b92abc9680f1796"
sh % "hg log -R latesttag -r0 -T '{extras.branch}\\n'" == "default"
sh % "hg log -R latesttag -l1 -T '{author.invalid}\\n'" == r"""
hg: parse error: keyword 'author' has no member
[255]"""
sh % "hg log -R latesttag -l1 -T '{min(\"abc\").invalid}\\n'" == r"""
hg: parse error: 'a' has no member
[255]"""
# Test the sub function of templating for expansion:
sh % 'hg log -R latesttag -r 5 --template \'{sub("[0-9]", "x", "{rev}")}\\n\'' == "x"
sh % 'hg log -R latesttag -r 5 -T \'{sub("[", "x", rev)}\\n\'' == r"""
hg: parse error: sub got an invalid pattern: [
[255]"""
sh % 'hg log -R latesttag -r 5 -T \'{sub("[0-9]", r"\\1", rev)}\\n\'' == r"""
hg: parse error: sub got an invalid replacement: \1
[255]"""
# Test the strip function with chars specified:
sh % "hg log -R latesttag --template '{desc}\\n'" == r"""
merge
h2e
h2d
h1c
b
a"""
sh % "hg log -R latesttag --template '{strip(desc, \"te\")}\\n'" == r"""
merg
h2
h2d
h1c
b
a"""
# Test date format:
sh % "hg log -R latesttag --template 'date: {date(date, \"%y %m %d %S %z\")}\\n'" == r"""
date: 70 01 01 05 +0100
date: 70 01 01 04 +0000
date: 70 01 01 03 +0000
date: 70 01 01 02 +0000
date: 70 01 01 01 +0000
date: 70 01 01 00 +0000"""
# Test invalid date:
sh % "hg log -R latesttag -T '{date(rev)}\\n'" == r"""
hg: parse error: date expects a date information
[255]"""
# Test integer literal:
sh % "hg debugtemplate -v '{(0)}\\n'" == r"""
(template
(group
(integer '0'))
(string '\n'))
0"""
sh % "hg debugtemplate -v '{(123)}\\n'" == r"""
(template
(group
(integer '123'))
(string '\n'))
123"""
sh % "hg debugtemplate -v '{(-4)}\\n'" == r"""
(template
(group
(negate
(integer '4')))
(string '\n'))
-4"""
sh % "hg debugtemplate '{(-)}\\n'" == r"""
hg: parse error at 3: not a prefix: )
({(-)}\n
^ here)
[255]"""
sh % "hg debugtemplate '{(-a)}\\n'" == r"""
hg: parse error: negation needs an integer argument
[255]"""
# top-level integer literal is interpreted as symbol (i.e. variable name):
sh % "hg debugtemplate -D '1=one' -v '{1}\\n'" == r"""
(template
(integer '1')
(string '\n'))
one"""
sh % "hg debugtemplate -D '1=one' -v '{if(\"t\", \"{1}\")}\\n'" == r"""
(template
(func
(symbol 'if')
(list
(string 't')
(template
(integer '1'))))
(string '\n'))
one"""
sh % "hg debugtemplate -D '1=one' -v '{1|stringify}\\n'" == r"""
(template
(|
(integer '1')
(symbol 'stringify'))
(string '\n'))
one"""
# unless explicit symbol is expected:
sh % "hg log -Ra -r0 -T '{desc|1}\\n'" == r"""
hg: parse error: expected a symbol, got 'integer'
[255]"""
sh % "hg log -Ra -r0 -T '{1()}\\n'" == r"""
hg: parse error: expected a symbol, got 'integer'
[255]"""
# Test string literal:
sh % "hg debugtemplate -Ra -r0 -v '{\"string with no template fragment\"}\\n'" == r"""
(template
(string 'string with no template fragment')
(string '\n'))
string with no template fragment"""
sh % "hg debugtemplate -Ra -r0 -v '{\"template: {rev}\"}\\n'" == r"""
(template
(template
(string 'template: ')
(symbol 'rev'))
(string '\n'))
template: 0"""
sh % "hg debugtemplate -Ra -r0 -v '{r\"rawstring: {rev}\"}\\n'" == r"""
(template
(string 'rawstring: {rev}')
(string '\n'))
rawstring: {rev}"""
sh % "hg debugtemplate -Ra -r0 -v '{files % r\"rawstring: {file}\"}\\n'" == r"""
(template
(%
(symbol 'files')
(string 'rawstring: {file}'))
(string '\n'))
rawstring: {file}"""
# Test string escaping:
sh % "hg log -R latesttag -r 0 --template '>\\n<>\\\\n<{if(rev, \"[>\\n<>\\\\n<]\")}>\\n<>\\\\n<\\n'" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
sh % "hg log -R latesttag -r 0 --config 'ui.logtemplate=>\\n<>\\\\n<{if(rev, \"[>\\n<>\\\\n<]\")}>\\n<>\\\\n<\\n'" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
sh % "hg log -R latesttag -r 0 -T esc --config 'templates.esc=>\\n<>\\\\n<{if(rev, \"[>\\n<>\\\\n<]\")}>\\n<>\\\\n<\\n'" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
sh % "cat" << r"""
changeset = '>\n<>\\n<{if(rev, "[>\n<>\\n<]")}>\n<>\\n<\n'
""" > "esctmpl"
sh % "hg log -R latesttag -r 0 --style ./esctmpl" == r"""
>
<>\n<[>
<>\n<]>
<>\n<"""
# Test string escaping of quotes:
sh % 'hg log -Ra -r0 -T \'{"\\""}\\n\'' == '"'
sh % 'hg log -Ra -r0 -T \'{"\\\\\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\\\\\""}\\n\'' == '\\\\\\"'
sh % 'hg log -Ra -r0 -T \'{"\\""}\\n\'' == '"'
sh % 'hg log -Ra -r0 -T \'{"\\\\\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\""}\\n\'' == '\\"'
sh % 'hg log -Ra -r0 -T \'{r"\\\\\\""}\\n\'' == '\\\\\\"'
# Test exception in quoted template. single backslash before quotation mark is
# stripped before parsing:
sh % "cat" << r"""
changeset = "\" \\" \\\" \\\\" {files % \"{file}\"}\n"
""" > "escquotetmpl"
sh % "cd latesttag"
sh % "hg log -r 2 --style ../escquotetmpl" == '" \\" \\" \\\\" head1'
sh % 'hg log -r 2 -T esc --config \'templates.esc="{\\"valid\\"}\\n"\'' == "valid"
sh % "hg log -r 2 -T esc --config 'templates.esc='\\''{\\'\\''valid\\'\\''}\\n'\\'''" == "valid"
# Test compatibility with 2.9.2-3.4 of escaped quoted strings in nested
# _evalifliteral() templates (issue4733):
sh % 'hg log -r 2 -T \'{if(rev, "\\"{rev}")}\\n\'' == '"2'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"\\\\\\"{rev}\\")}")}\\n\'' == '"2'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"{if(rev, \\\\\\"\\\\\\\\\\\\\\"{rev}\\\\\\")}\\")}")}\\n\'' == '"2'
sh % 'hg log -r 2 -T \'{if(rev, "\\\\\\"")}\\n\'' == '\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"\\\\\\\\\\\\\\"\\")}")}\\n\'' == '\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"{if(rev, \\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\")}\\")}")}\\n\'' == '\\"'
sh % 'hg log -r 2 -T \'{if(rev, r"\\\\\\"")}\\n\'' == '\\\\\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, r\\"\\\\\\\\\\\\\\"\\")}")}\\n\'' == '\\\\\\"'
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\"{if(rev, r\\\\\\"\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"\\\\\\")}\\")}")}\\n\'' == '\\\\\\"'
# escaped single quotes and errors:
sh % "hg log -r 2 -T '{if(rev, '\\''{if(rev, \\'\\''foo\\'\\'')}'\\'')}\\n'" == "foo"
sh % "hg log -r 2 -T '{if(rev, '\\''{if(rev, r\\'\\''foo\\'\\'')}'\\'')}\\n'" == "foo"
sh % 'hg log -r 2 -T \'{if(rev, "{if(rev, \\")}")}\\n\'' == r"""
hg: parse error at 21: unterminated string
({if(rev, "{if(rev, \")}")}\n
^ here)
[255]"""
sh % 'hg log -r 2 -T \'{if(rev, \\"\\\\"")}\\n\'' == r"""
hg: parse error: trailing \ in string
[255]"""
sh % 'hg log -r 2 -T \'{if(rev, r\\"\\\\"")}\\n\'' == r"""
hg: parse error: trailing \ in string
[255]"""
sh % "cd .."
# Test leading backslashes:
sh % "cd latesttag"
sh % "hg log -r 2 -T '\\{rev} {files % \"\\{file}\"}\\n'" == "{rev} {file}"
sh % "hg log -r 2 -T '\\\\{rev} {files % \"\\\\{file}\"}\\n'" == "\\2 \\head1"
sh % "hg log -r 2 -T '\\\\\\{rev} {files % \"\\\\\\{file}\"}\\n'" == "\\{rev} \\{file}"
sh % "cd .."
# Test leading backslashes in "if" expression (issue4714):
sh % "cd latesttag"
sh % 'hg log -r 2 -T \'{if("1", "\\{rev}")} {if("1", r"\\{rev}")}\\n\'' == "{rev} \\{rev}"
sh % 'hg log -r 2 -T \'{if("1", "\\\\{rev}")} {if("1", r"\\\\{rev}")}\\n\'' == "\\2 \\\\{rev}"
sh % 'hg log -r 2 -T \'{if("1", "\\\\\\{rev}")} {if("1", r"\\\\\\{rev}")}\\n\'' == "\\{rev} \\\\\\{rev}"
sh % "cd .."
# "string-escape"-ed "\x5c\x786e" becomes r"\x6e" (once) or r"n" (twice)
sh % 'hg log -R a -r 0 --template \'{if("1", "\\x5c\\x786e", "NG")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 0 --template \'{if("1", r"\\x5c\\x786e", "NG")}\\n\'' == "\\x5c\\x786e"
sh % 'hg log -R a -r 0 --template \'{if("", "NG", "\\x5c\\x786e")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 0 --template \'{if("", "NG", r"\\x5c\\x786e")}\\n\'' == "\\x5c\\x786e"
sh % 'hg log -R a -r 2 --template \'{ifeq("no perso\\x6e", desc, "\\x5c\\x786e", "NG")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 2 --template \'{ifeq(r"no perso\\x6e", desc, "NG", r"\\x5c\\x786e")}\\n\'' == "\\x5c\\x786e"
sh % 'hg log -R a -r 2 --template \'{ifeq(desc, "no perso\\x6e", "\\x5c\\x786e", "NG")}\\n\'' == "\\x6e"
sh % 'hg log -R a -r 2 --template \'{ifeq(desc, r"no perso\\x6e", "NG", r"\\x5c\\x786e")}\\n\'' == "\\x5c\\x786e"
sh % "hg log -R a -r 8 --template '{join(files, \"\\n\")}\\n'" == r"""
fourth
second
third"""
sh % "hg log -R a -r 8 --template '{join(files, r\"\\n\")}\\n'" == "fourth\\nsecond\\nthird"
sh % 'hg log -R a -r 2 --template \'{rstdoc("1st\\n\\n2nd", "htm\\x6c")}\'' == r"""
<p>
1st
</p>
<p>
2nd
</p>"""
sh % 'hg log -R a -r 2 --template \'{rstdoc(r"1st\\n\\n2nd", "html")}\'' == r"""
<p>
1st\n\n2nd
</p>"""
sh % 'hg log -R a -r 2 --template \'{rstdoc("1st\\n\\n2nd", r"htm\\x6c")}\'' == r"""
1st
2nd"""
sh % "hg log -R a -r 2 --template '{strip(desc, \"\\x6e\")}\\n'" == "o perso"
sh % "hg log -R a -r 2 --template '{strip(desc, r\"\\x6e\")}\\n'" == "no person"
sh % 'hg log -R a -r 2 --template \'{strip("no perso\\x6e", "\\x6e")}\\n\'' == "o perso"
sh % 'hg log -R a -r 2 --template \'{strip(r"no perso\\x6e", r"\\x6e")}\\n\'' == "no perso"
sh % 'hg log -R a -r 2 --template \'{sub("\\\\x6e", "\\x2d", desc)}\\n\'' == "-o perso-"
sh % 'hg log -R a -r 2 --template \'{sub(r"\\\\x6e", "-", desc)}\\n\'' == "no person"
sh % pycompat.decodeutf8(
b'hg log -R a -r 2 --template \'{sub("n", "\\x2d", "no perso\\x6e")}\\n\''
) == pycompat.decodeutf8(b"-o perso-")
sh % "hg log -R a -r 8 --template '{files % \"{file}\\n\"}'" == r"""
fourth
second
third"""
# Test string escaping in nested expression:
sh % 'hg log -R a -r 8 --template \'{ifeq(r"\\x6e", if("1", "\\x5c\\x786e"), join(files, "\\x5c\\x786e"))}\\n\'' == "fourth\\x6esecond\\x6ethird"
sh % 'hg log -R a -r 8 --template \'{ifeq(if("1", r"\\x6e"), "\\x5c\\x786e", join(files, "\\x5c\\x786e"))}\\n\'' == "fourth\\x6esecond\\x6ethird"
sh % 'hg log -R a -r 8 --template \'{join(files, ifeq(branch, "default", "\\x5c\\x786e"))}\\n\'' == "fourth\\x6esecond\\x6ethird"
sh % 'hg log -R a -r 8 --template \'{join(files, ifeq(branch, "default", r"\\x5c\\x786e"))}\\n\'' == "fourth\\x5c\\x786esecond\\x5c\\x786ethird"
# Test quotes in nested expression are evaluated just like a $(command)
# substitution in POSIX shells:
sh % 'hg log -R a -r 8 -T \'{"{"{rev}:{node|short}"}"}\\n\'' == "8:209edb6a1848"
sh % 'hg log -R a -r 8 -T \'{"{"\\{{rev}} \\"{node|short}\\""}"}\\n\'' == '{8} "209edb6a1848"'
# Test recursive evaluation:
sh % "hg init r"
sh % "cd r"
sh % "echo a" > "a"
sh % "hg ci -Am '{rev}'" == "adding a"
sh % "hg log -r 0 --template '{if(rev, desc)}\\n'" == "{rev}"
sh % "hg log -r 0 --template '{if(rev, \"{author} {rev}\")}\\n'" == "test 0"
sh % "hg bookmark -q 'text.{rev}'"
sh % "echo aa" >> "aa"
sh % "hg ci -u '{node|short}' -m 'desc to be wrapped desc to be wrapped'"
sh % "hg log -l1 --template '{fill(desc, \"20\", author, bookmarks)}'" == r"""
{node|short}desc to
text.{rev}be wrapped
text.{rev}desc to be
text.{rev}wrapped"""
sh % 'hg log -l1 --template \'{fill(desc, "20", "{node|short}:", "text.{rev}:")}\'' == r"""
ea4c0948489d:desc to
text.1:be wrapped
text.1:desc to be
text.1:wrapped"""
sh % 'hg log -l1 -T \'{fill(desc, date, "", "")}\\n\'' == r"""
hg: parse error: fill expects an integer width
[255]"""
sh % "'COLUMNS=25' hg log -l1 --template '{fill(desc, termwidth, \"{node|short}:\", \"termwidth.{rev}:\")}'" == r"""
ea4c0948489d:desc to be
termwidth.1:wrapped desc
termwidth.1:to be wrapped"""
sh % 'hg log -l 1 --template \'{sub(r"[0-9]", "-", author)}\'' == "{node|short}"
sh % 'hg log -l 1 --template \'{sub(r"[0-9]", "-", "{node|short}")}\'' == "ea-c-------d"
(
sh % "cat"
<< r"""
[extensions]
color=
[color]
mode=ansi
text.{rev} = red
text.1 = green
"""
>> ".hg/hgrc"
)
sh % "hg log '--color=always' -l 1 --template '{label(bookmarks, \"text\\n\")}'" == "\\x1b[0;31mtext\\x1b[0m (esc)"
sh % "hg log '--color=always' -l 1 --template '{label(\"text.{rev}\", \"text\\n\")}'" == "\\x1b[0;32mtext\\x1b[0m (esc)"
# color effect can be specified without quoting:
sh % "hg log '--color=always' -l 1 --template '{label(red, \"text\\n\")}'" == "\\x1b[0;31mtext\\x1b[0m (esc)"
# color effects can be nested (issue5413)
sh % 'hg debugtemplate \'--color=always\' \'{label(red, "red{label(magenta, "ma{label(cyan, "cyan")}{label(yellow, "yellow")}genta")}")}\\n\'' == "\\x1b[0;31mred\\x1b[0;35mma\\x1b[0;36mcyan\\x1b[0m\\x1b[0;31m\\x1b[0;35m\\x1b[0;33myellow\\x1b[0m\\x1b[0;31m\\x1b[0;35mgenta\\x1b[0m (esc)"
# pad() should interact well with color codes (issue5416)
sh % "hg debugtemplate '--color=always' '{pad(label(red, \"red\"), 5, label(cyan, \"-\"))}\\n'" == "\\x1b[0;31mred\\x1b[0m\\x1b[0;36m-\\x1b[0m\\x1b[0;36m-\\x1b[0m (esc)"
# label should be no-op if color is disabled:
sh % "hg log '--color=never' -l 1 --template '{label(red, \"text\\n\")}'" == "text"
sh % "hg log --config 'extensions.color=!' -l 1 --template '{label(red, \"text\\n\")}'" == "text"
# Test dict constructor:
sh % "hg log -r 0 -T '{dict(y=node|short, x=rev)}\\n'" == "y=f7769ec2ab97 x=0"
sh % "hg log -r 0 -T '{dict(x=rev, y=node|short) % \"{key}={value}\\n\"}'" == r"""
x=0
y=f7769ec2ab97"""
sh % "hg log -r 0 -T '{dict(x=rev, y=node|short)|json}\\n'" == '{"x": 0, "y": "f7769ec2ab97"}'
sh % "hg log -r 0 -T '{dict()|json}\\n'" == "{}"
sh % "hg log -r 0 -T '{dict(rev, node=node|short)}\\n'" == "rev=0 node=f7769ec2ab97"
sh % "hg log -r 0 -T '{dict(rev, node|short)}\\n'" == "rev=0 node=f7769ec2ab97"
sh % "hg log -r 0 -T '{dict(rev, rev=rev)}\\n'" == r"""
hg: parse error: duplicated dict key 'rev' inferred
[255]"""
sh % "hg log -r 0 -T '{dict(node, node|short)}\\n'" == r"""
hg: parse error: duplicated dict key 'node' inferred
[255]"""
sh % "hg log -r 0 -T '{dict(1 + 2)}'" == r"""
hg: parse error: dict key cannot be inferred
[255]"""
sh % "hg log -r 0 -T '{dict(x=rev, x=node)}'" == r"""
hg: parse error: dict got multiple values for keyword argument 'x'
[255]"""
# Test get function:
sh % "hg log -r 0 --template '{get(extras, \"branch\")}\\n'" == "default"
sh % 'hg log -r 0 --template \'{get(extras, "br{"anch"}")}\\n\'' == "default"
sh % "hg log -r 0 --template '{get(files, \"should_fail\")}\\n'" == r"""
hg: parse error: get() expects a dict as first argument
[255]"""
# Test json filter applied to hybrid object:
sh % "hg log -r0 -T '{files|json}\\n'" == '["a"]'
sh % "hg log -r0 -T '{extras|json}\\n'" == '{"branch": "default"}'
# Test localdate(date, tz) function:
# TZ= does not override the global timezone state on Windows.
if os.name != "nt":
oldtz = os.environ.get("TZ")
os.environ["TZ"] = "JST-09"
import time
# tzset() is required for Python 3.6+ to recognize the timezone change.
# https://bugs.python.org/issue30062
time.tzset()
sh % "hg log -r0 -T '{date|localdate|isodate}\\n'" == "1970-01-01 09:00 +0900"
sh % "hg log -r0 -T '{localdate(date, \"UTC\")|isodate}\\n'" == "1970-01-01 00:00 +0000"
sh % "hg log -r0 -T '{localdate(date, \"blahUTC\")|isodate}\\n'" == r"""
hg: parse error: localdate expects a timezone
[255]"""
sh % "hg log -r0 -T '{localdate(date, \"+0200\")|isodate}\\n'" == "1970-01-01 02:00 +0200"
sh % "hg log -r0 -T '{localdate(date, \"0\")|isodate}\\n'" == "1970-01-01 00:00 +0000"
sh % "hg log -r0 -T '{localdate(date, 0)|isodate}\\n'" == "1970-01-01 00:00 +0000"
if oldtz is not None:
os.environ["TZ"] = oldtz
else:
del os.environ["TZ"]
sh % "hg log -r0 -T '{localdate(date, \"invalid\")|isodate}\\n'" == r"""
hg: parse error: localdate expects a timezone
[255]"""
sh % "hg log -r0 -T '{localdate(date, date)|isodate}\\n'" == r"""
hg: parse error: localdate expects a timezone
[255]"""
# Test shortest(node) function:
sh % "echo b" > "b"
sh % "hg ci -qAm b"
sh % "hg log --template '{shortest(node)}\\n'" == r"""
21c1
ea4c
f776"""
sh % "hg log --template '{shortest(node, 10)}\\n'" == r"""
21c1b7ca5a
ea4c094848
f7769ec2ab"""
sh % "hg log --template '{node|shortest}\\n' -l1" == "21c1"
sh % 'hg log -r 0 -T \'{shortest(node, "1{"0"}")}\\n\'' == "f7769ec2ab"
sh % "hg log -r 0 -T '{shortest(node, \"not an int\")}\\n'" == r"""
hg: parse error: shortest() expects an integer minlength
[255]"""
sh % "hg log -r 'wdir()' -T '{node|shortest}\\n'" == "ffffffffffffffffffffffffffffffffffffffff"
sh % "cd .."
# Test shortest(node) with the repo having short hash collision:
sh % "hg init hashcollision"
sh % "cd hashcollision"
(
sh % "cat"
<< r"""
[experimental]
evolution.createmarkers=True
"""
>> ".hg/hgrc"
)
sh % "echo 0" > "a"
sh % "hg ci -qAm 0"
for i in [17, 129, 248, 242, 480, 580, 617, 1057, 2857, 4025]:
sh.hg("up", "-q", "0")
open("a", "wb").write(b"%s\n" % pycompat.encodeutf8(str(i)))
sh.hg("ci", "-qm", "%s" % i)
sh % "hg up -q null"
sh % "hg log '-r0:' -T '{rev}:{node}\\n'" == r"""
0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a
1:11424df6dc1dd4ea255eae2b58eaca7831973bbc
2:11407b3f1b9c3e76a79c1ec5373924df096f0499
3:11dd92fe0f39dfdaacdaa5f3997edc533875cfc4
4:10776689e627b465361ad5c296a20a487e153ca4
5:a00be79088084cb3aff086ab799f8790e01a976b
6:a0b0acd79b4498d0052993d35a6a748dd51d13e6
7:a0457b3450b8e1b778f1163b31a435802987fe5d
8:c56256a09cd28e5764f32e8e2810d0f01e2e357a
9:c5623987d205cd6d9d8389bfc40fff9dbb670b48
10:c562ddd9c94164376c20b86b0b4991636a3bf84f"""
sh % "hg debugobsolete a00be79088084cb3aff086ab799f8790e01a976b" == ""
sh % "hg debugobsolete c5623987d205cd6d9d8389bfc40fff9dbb670b48" == ""
sh % "hg debugobsolete c562ddd9c94164376c20b86b0b4991636a3bf84f" == ""
# nodes starting with '11' (we don't have the revision number '11' though)
sh % "hg log -r '1:3' -T '{rev}:{shortest(node, 0)}\\n'" == r"""
1:1142
2:1140
3:11d"""
# '5:a00' is hidden, but still we have two nodes starting with 'a0'
sh % "hg log -r '6:7' -T '{rev}:{shortest(node, 0)}\\n'" == r"""
6:a0b
7:a04"""
# node '10' conflicts with the revision number '10' even if it is hidden
# (we could exclude hidden revision numbers, but currently we don't)
sh % "hg log -r 4 -T '{rev}:{shortest(node, 0)}\\n'" == "4:107"
sh % "hg log -r 4 -T '{rev}:{shortest(node, 0)}\\n' --hidden" == "4:107"
# node 'c562' should be unique if the other 'c562' nodes are hidden
# (but we don't try the slow path to filter out hidden nodes for now)
sh % "hg log -r 8 -T '{rev}:{node|shortest}\\n'" == "8:c5625"
sh % "hg log -r '8:10' -T '{rev}:{node|shortest}\\n' --hidden" == r"""
8:c5625
9:c5623
10:c562d"""
sh % "cd .."
# Test pad function
sh % "cd r"
sh % "hg log --template '{pad(rev, 20)} {author|user}\\n'" == r"""
2 test
1 {node|short}
0 test"""
sh % "hg log --template '{pad(rev, 20, \" \", True)} {author|user}\\n'" == r"""
2 test
1 {node|short}
0 test"""
sh % "hg log --template '{pad(rev, 20, \"-\", False)} {author|user}\\n'" == r"""
2------------------- test
1------------------- {node|short}
0------------------- test"""
# Test unicode fillchar
sh % pycompat.decodeutf8(
b"'HGENCODING=utf-8' hg log -r 0 -T '{pad(\"hello\", 10, \"\xe2\x98\x83\")}world\\n'"
) == pycompat.decodeutf8(
b"hello\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83world"
)
# Test template string in pad function
sh % "hg log -r 0 -T '{pad(\"\\{{rev}}\", 10)} {author|user}\\n'" == "{0} test"
sh % "hg log -r 0 -T '{pad(r\"\\{rev}\", 10)} {author|user}\\n'" == "\\{rev} test"
# Test width argument passed to pad function
sh % 'hg log -r 0 -T \'{pad(rev, "1{"0"}")} {author|user}\\n\'' == "0 test"
sh % "hg log -r 0 -T '{pad(rev, \"not an int\")}\\n'" == r"""
hg: parse error: pad() expects an integer width
[255]"""
# Test invalid fillchar passed to pad function
sh % "hg log -r 0 -T '{pad(rev, 10, \"\")}\\n'" == r"""
hg: parse error: pad() expects a single fill character
[255]"""
sh % "hg log -r 0 -T '{pad(rev, 10, \"--\")}\\n'" == r"""
hg: parse error: pad() expects a single fill character
[255]"""
# Test boolean argument passed to pad function
# no crash
sh % 'hg log -r 0 -T \'{pad(rev, 10, "-", "f{"oo"}")}\\n\'' == "---------0"
# string/literal
sh % 'hg log -r 0 -T \'{pad(rev, 10, "-", "false")}\\n\'' == "---------0"
sh % "hg log -r 0 -T '{pad(rev, 10, \"-\", false)}\\n'" == "0---------"
sh % 'hg log -r 0 -T \'{pad(rev, 10, "-", "")}\\n\'' == "0---------"
# unknown keyword is evaluated to ''
sh % "hg log -r 0 -T '{pad(rev, 10, \"-\", unknownkeyword)}\\n'" == "0---------"
# Test separate function
sh % 'hg log -r 0 -T \'{separate("-", "", "a", "b", "", "", "c", "")}\\n\'' == "a-b-c"
sh % 'hg log -r 0 -T \'{separate(" ", "{rev}:{node|short}", author|user, bookmarks)}\\n\'' == "0:f7769ec2ab97 test"
sh % 'hg log -r 0 \'--color=always\' -T \'{separate(" ", "a", label(red, "b"), "c", label(red, ""), "d")}\\n\'' == "a \\x1b[0;31mb\\x1b[0m c d (esc)"
# Test boolean expression/literal passed to if function
sh % "hg log -r 0 -T '{if(rev, \"rev 0 is True\")}\\n'" == "rev 0 is True"
sh % "hg log -r 0 -T '{if(0, \"literal 0 is True as well\")}\\n'" == "literal 0 is True as well"
sh % 'hg log -r 0 -T \'{if("", "", "empty string is False")}\\n\'' == "empty string is False"
sh % 'hg log -r 0 -T \'{if(revset(r"0 - 0"), "", "empty list is False")}\\n\'' == "empty list is False"
sh % "hg log -r 0 -T '{if(true, \"true is True\")}\\n'" == "true is True"
sh % 'hg log -r 0 -T \'{if(false, "", "false is False")}\\n\'' == "false is False"
sh % 'hg log -r 0 -T \'{if("false", "non-empty string is True")}\\n\'' == "non-empty string is True"
# Test ifcontains function
sh % 'hg log --template \'{rev} {ifcontains(rev, "2 two 0", "is in the string", "is not")}\\n\'' == r"""
2 is in the string
1 is not
0 is in the string"""
sh % 'hg log -T \'{rev} {ifcontains(rev, "2 two{" 0"}", "is in the string", "is not")}\\n\'' == r"""
2 is in the string
1 is not
0 is in the string"""
sh % 'hg log --template \'{rev} {ifcontains("a", file_adds, "added a", "did not add a")}\\n\'' == r"""
2 did not add a
1 did not add a
0 added a"""
sh % "hg log --debug -T '{rev}{ifcontains(1, parents, \" is parent of 1\")}\\n'" == r"""
2 is parent of 1
1
0"""
# Test revset function
sh % 'hg log --template \'{rev} {ifcontains(rev, revset("."), "current rev", "not current rev")}\\n\'' == r"""
2 current rev
1 not current rev
0 not current rev"""
sh % 'hg log --template \'{rev} {ifcontains(rev, revset(". + .^"), "match rev", "not match rev")}\\n\'' == r"""
2 match rev
1 match rev
0 not match rev"""
sh % 'hg log -T \'{ifcontains(desc, revset(":"), "", "type not match")}\\n\' -l1' == "type not match"
sh % "hg log --template '{rev} Parents: {revset(\"parents(%s)\", rev)}\\n'" == r"""
2 Parents: 1
1 Parents: 0
0 Parents:"""
(
sh % "cat"
<< r"""
[revsetalias]
myparents(x) = parents(x)
"""
>> ".hg/hgrc"
)
sh % "hg log --template '{rev} Parents: {revset(\"myparents(%s)\", rev)}\\n'" == r"""
2 Parents: 1
1 Parents: 0
0 Parents:"""
sh % 'hg log --template \'Rev: {rev}\\n{revset("::%s", rev) % "Ancestor: {revision}\\n"}\\n\'' == r"""
Rev: 2
Ancestor: 0
Ancestor: 1
Ancestor: 2
Rev: 1
Ancestor: 0
Ancestor: 1
Rev: 0
Ancestor: 0"""
sh % "hg log --template '{revset(\"TIP\"|lower)}\\n' -l1" == "2"
sh % 'hg log -T \'{revset("%s", "t{"ip"}")}\\n\' -l1' == "2"
# a list template is evaluated for each item of revset/parents
sh % 'hg log -T \'{rev} p: {revset("p1(%s)", rev) % "{rev}:{node|short}"}\\n\'' == r"""
2 p: 1:ea4c0948489d
1 p: 0:f7769ec2ab97
0 p:"""
sh % "hg log --debug -T '{rev} p:{parents % \" {rev}:{node|short}\"}\\n'" == r"""
2 p: 1:ea4c0948489d
1 p: 0:f7769ec2ab97
0 p: -1:000000000000"""
# therefore, 'revcache' should be recreated for each rev
sh % 'hg log -T \'{rev} {file_adds}\\np {revset("p1(%s)", rev) % "{file_adds}"}\\n\'' == r"""
2 aa b
p (trailing space)
1 (trailing space)
p a
0 a
p"""
sh % "hg log --debug -T '{rev} {file_adds}\\np {parents % \"{file_adds}\"}\\n'" == r"""
2 aa b
p (trailing space)
1 (trailing space)
p a
0 a
p"""
# a revset item must be evaluated as an integer revision, not an offset from tip
sh % 'hg log -l 1 -T \'{revset("null") % "{rev}:{node|short}"}\\n\'' == "-1:000000000000"
sh % 'hg log -l 1 -T \'{revset("%s", "null") % "{rev}:{node|short}"}\\n\'' == "-1:000000000000"
# join() should pick '{rev}' from revset items:
sh % 'hg log -R ../a -T \'{join(revset("parents(%d)", rev), ", ")}\\n\' -r6' == "4, 5"
# on the other hand, parents are formatted as '{rev}:{node|formatnode}' by
# default. join() should agree with the default formatting:
sh % "hg log -R ../a -T '{join(parents, \", \")}\\n' -r6" == "13207e5a10d9, 07fa1db10648"
sh % "hg log -R ../a -T '{join(parents, \",\\n\")}\\n' -r6 --debug" == r"""
13207e5a10d9fd28ec424934298e176197f2c67f,
07fa1db1064879a32157227401eb44b322ae53ce"""
# Test files function
sh % "hg log -T '{rev}\\n{join(files('\\''*'\\''), '\\''\\n'\\'')}\\n'" == r"""
2
a
aa
b
1
a
0
a"""
sh % "hg log -T '{rev}\\n{join(files('\\''aa'\\''), '\\''\\n'\\'')}\\n'" == r"""
2
aa
1
0"""
# Test relpath function
sh % "hg log -r0 -T '{files % \"{file|relpath}\\n\"}'" == "a"
sh % "cd .."
sh % "hg log -R r -r0 -T '{files % \"{file|relpath}\\n\"}'" == "r/a"
sh % "cd r"
# Test active bookmark templating
sh % "hg book foo"
sh % "hg book bar"
sh % "hg log --template '{rev} {bookmarks % '\\''{bookmark}{ifeq(bookmark, active, \"*\")} '\\''}\\n'" == r"""
2 bar* foo text.{rev} (trailing space)
1 (trailing space)
0"""
sh % "hg log --template '{rev} {activebookmark}\\n'" == r"""
2 bar
1 (trailing space)
0"""
sh % "hg bookmarks --inactive bar"
sh % "hg log --template '{rev} {activebookmark}\\n'" == r"""
2 (trailing space)
1 (trailing space)
0"""
sh % "hg book -r1 baz"
sh % "hg log --template '{rev} {join(bookmarks, '\\'' '\\'')}\\n'" == r"""
2 bar foo text.{rev}
1 baz
0"""
sh % "hg log --template '{rev} {ifcontains('\\''foo'\\'', bookmarks, '\\''t'\\'', '\\''f'\\'')}\\n'" == r"""
2 t
1 f
0 f"""
# Test namespaces dict
sh % 'hg --config "extensions.revnamesext=$TESTDIR/revnamesext.py" log -T \'{rev}\\n{namespaces % " {namespace} color={colorname} builtin={builtin}\\n {join(names, ",")}\\n"}\\n\'' == r"""
2
bookmarks color=bookmark builtin=True
bar,foo,text.{rev}
branches color=branch builtin=True
default
remotebookmarks color=remotebookmark builtin=True
revnames color=revname builtin=False
r2
1
bookmarks color=bookmark builtin=True
baz
branches color=branch builtin=True
default
remotebookmarks color=remotebookmark builtin=True
revnames color=revname builtin=False
r1
0
bookmarks color=bookmark builtin=True
branches color=branch builtin=True
default
remotebookmarks color=remotebookmark builtin=True
revnames color=revname builtin=False
r0"""
# revert side effect of loading the revnames extension
del namespaces.namespacetable["revnames"]
sh % "hg log -r2 -T '{namespaces % \"{namespace}: {names}\\n\"}'" == r"""
bookmarks: bar foo text.{rev}
branches: default
remotebookmarks:"""
sh % 'hg log -r2 -T \'{namespaces % "{namespace}:\\n{names % " {name}\\n"}"}\'' == r"""
bookmarks:
bar
foo
text.{rev}
branches:
default
remotebookmarks:"""
sh % 'hg log -r2 -T \'{get(namespaces, "bookmarks") % "{name}\\n"}\'' == r"""
bar
foo
text.{rev}"""
sh % "hg log -r2 -T '{namespaces.bookmarks % \"{bookmark}\\n\"}'" == r"""
bar
foo
text.{rev}"""
# Test stringify on sub expressions
sh % "cd .."
sh % 'hg log -R a -r 8 --template \'{join(files, if("1", if("1", ", ")))}\\n\'' == "fourth, second, third"
sh % 'hg log -R a -r 8 --template \'{strip(if("1", if("1", "-abc-")), if("1", if("1", "-")))}\\n\'' == "abc"
# Test splitlines
sh % "hg log -Gv -R a --template '{splitlines(desc) % '\\''foo {line}\\n'\\''}'" == r"""
@ foo Modify, add, remove, rename
│
o foo future
│
o foo third
│
o foo second
o foo merge
├─╮
│ o foo new head
│ │
o │ foo new branch
├─╯
o foo no user, no domain
│
o foo no person
│
o foo other 1
│ foo other 2
│ foo
│ foo other 3
o foo line 1
foo line 2"""
sh % "hg log -R a -r0 -T '{desc|splitlines}\\n'" == "line 1 line 2"
sh % "hg log -R a -r0 -T '{join(desc|splitlines, \"|\")}\\n'" == "line 1|line 2"
# Test startswith
sh % "hg log -Gv -R a --template '{startswith(desc)}'" == r"""
hg: parse error: startswith expects two arguments
[255]"""
sh % "hg log -Gv -R a --template '{startswith('\\''line'\\'', desc)}'" == r"""
@
│
o
│
o
│
o
o
├─╮
│ o
│ │
o │
├─╯
o
│
o
│
o
│
o line 1
line 2"""
# Test bad template with better error message
sh % "hg log -Gv -R a --template '{desc|user()}'" == r"""
hg: parse error: expected a symbol, got 'func'
[255]"""
# Test word function (including index out of bounds graceful failure)
sh % "hg log -Gv -R a --template '{word('\\''1'\\'', desc)}'" == r"""
@ add,
│
o
│
o
│
o
o
├─╮
│ o head
│ │
o │ branch
├─╯
o user,
│
o person
│
o 1
│
o 1"""
# Test word third parameter used as splitter
sh % "hg log -Gv -R a --template '{word('\\''0'\\'', desc, '\\''o'\\'')}'" == r"""
@ M
│
o future
│
o third
│
o sec
o merge
├─╮
│ o new head
│ │
o │ new branch
├─╯
o n
│
o n
│
o
│
o line 1
line 2"""
# Test word error messages for not enough and too many arguments
sh % "hg log -Gv -R a --template '{word('\\''0'\\'')}'" == r"""
hg: parse error: word expects two or three arguments, got 1
[255]"""
sh % "hg log -Gv -R a --template '{word('\\''0'\\'', desc, '\\''o'\\'', '\\''h'\\'', '\\''b'\\'', '\\''o'\\'', '\\''y'\\'')}'" == r"""
hg: parse error: word expects two or three arguments, got 7
[255]"""
# Test word for integer literal
sh % "hg log -R a --template '{word(2, desc)}\\n' -r0" == "line"
# Test word for invalid numbers
sh % "hg log -Gv -R a --template '{word('\\''a'\\'', desc)}'" == r"""
hg: parse error: word expects an integer index
[255]"""
# Test word for out of range
sh % "hg log -R a --template '{word(10000, desc)}'"
sh % "hg log -R a --template '{word(-10000, desc)}'"
# Test indent and not adding to empty lines
sh % "hg log -T '-----\\n{indent(desc, '\\''.. '\\'', '\\'' . '\\'')}\\n' -r '0:1' -R a" == r"""
-----
. line 1
.. line 2
-----
. other 1
.. other 2
.. other 3"""
# Test with non-strings like dates
sh % "hg log -T '{indent(date, '\\'' '\\'')}\\n' -r '2:3' -R a" == r"""
1200000.00
1300000.00"""
# Test broken string escapes:
sh % "hg log -T 'bogus\\' -R a" == r"""
hg: parse error: trailing \ in string
[255]"""
sh % pycompat.decodeutf8(
b"hg log -T '\\xy' -R a"
) == r"""
hg: parse error: invalid \x escape* (glob)
[255]"""
# Templater supports aliases of symbol and func() styles:
sh % "hg clone -q a aliases"
sh % "cd aliases"
(
sh % "cat"
<< r"""
[templatealias]
r = rev
rn = "{r}:{node|short}"
status(c, files) = files % "{c} {file}\n"
utcdate(d) = localdate(d, "UTC")
"""
>> ".hg/hgrc"
)
sh % "hg debugtemplate -vr0 '{rn} {utcdate(date)|isodate}\\n'" == r"""
(template
(symbol 'rn')
(string ' ')
(|
(func
(symbol 'utcdate')
(symbol 'date'))
(symbol 'isodate'))
(string '\n'))
* expanded:
(template
(template
(symbol 'rev')
(string ':')
(|
(symbol 'node')
(symbol 'short')))
(string ' ')
(|
(func
(symbol 'localdate')
(list
(symbol 'date')
(string 'UTC')))
(symbol 'isodate'))
(string '\n'))
0:1e4e1b8f71e0 1970-01-12 13:46 +0000"""
sh % "hg debugtemplate -vr0 '{status(\"A\", file_adds)}'" == r"""
(template
(func
(symbol 'status')
(list
(string 'A')
(symbol 'file_adds'))))
* expanded:
(template
(%
(symbol 'file_adds')
(template
(string 'A')
(string ' ')
(symbol 'file')
(string '\n'))))
A a"""
# A unary function alias can be called as a filter:
sh % "hg debugtemplate -vr0 '{date|utcdate|isodate}\\n'" == r"""
(template
(|
(|
(symbol 'date')
(symbol 'utcdate'))
(symbol 'isodate'))
(string '\n'))
* expanded:
(template
(|
(func
(symbol 'localdate')
(list
(symbol 'date')
(string 'UTC')))
(symbol 'isodate'))
(string '\n'))
1970-01-12 13:46 +0000"""
# Aliases should be applied only to command arguments and templates in hgrc.
# Otherwise, our stock styles and web templates could be corrupted:
sh % "hg log -r0 -T '{rn} {utcdate(date)|isodate}\\n'" == "0:1e4e1b8f71e0 1970-01-12 13:46 +0000"
sh % "hg log -r0 --config 'ui.logtemplate=\"{rn} {utcdate(date)|isodate}\\n\"'" == "0:1e4e1b8f71e0 1970-01-12 13:46 +0000"
sh % "cat" << r"""
changeset = 'nothing expanded:{rn}\n'
""" > "tmpl"
sh % "hg log -r0 --style ./tmpl" == "nothing expanded:"
# Aliases in formatter:
sh % "hg bookmarks -T '{pad(bookmark, 7)} {rn}\\n'" == "foo :07fa1db10648"
# Aliases should honor HGPLAIN:
if os.name != "nt":
# Environment override does not work well across Python/Rust boundry on
# Windows. A solution will be changing the config parser take an environ
# instead of using hardcoded system env.
sh % "'HGPLAIN=' hg log -r0 -T 'nothing expanded:{rn}\\n'" == "nothing expanded:"
sh % "'HGPLAINEXCEPT=templatealias' hg log -r0 -T '{rn}\\n'" == "0:1e4e1b8f71e0"
# Unparsable alias:
sh % "hg debugtemplate --config 'templatealias.bad=x(' -v '{bad}'" == r"""
(template
(symbol 'bad'))
abort: bad definition of template alias "bad": at 2: not a prefix: end
[255]"""
sh % "hg log --config 'templatealias.bad=x(' -T '{bad}'" == r"""
abort: bad definition of template alias "bad": at 2: not a prefix: end
[255]"""
sh % "cd .."
# Set up repository for non-ascii encoding tests:
sh % "hg init nonascii"
sh % "cd nonascii"
utf8 = "\u00e9" # == "é"
open("utf-8", "wb").write(pycompat.encodeutf8(utf8))
sh % ("hg bookmark -q '%s'" % utf8)
sh % ("hg ci -qAm 'non-ascii branch: %s' utf-8" % utf8)
# json filter should try round-trip conversion to utf-8:
# Mercurial's json encoding works a little differently in Python 2 and 3 since
# it escapes bytes differently from unicode strings. Let's set the tests to test
# the long term vision of pure unicode.
import sys
if sys.version_info[0] >= 3:
sh % "hg log -T '{bookmarks|json}\\n' -r0" == '["\\u00e9"]'
sh % "hg log -T '{desc|json}\\n' -r0" == '"non-ascii branch: \\u00e9"'
# json filter takes input as utf-8b:
sh % ("hg log -T '{'\\''%s'\\''|json}\\n' -l1" % utf8) == '"\\u00e9"'
# pad width:
sh % (
"hg debugtemplate '{pad('\\''%s'\\'', 2, '\\''-'\\'')}\\n'" % utf8
) == "\u00e9- (esc)"
sh % "cd .."
# Test that template function in extension is registered as expected
sh % "cd a"
sh % "cat" << r"""
from edenscm.mercurial import registrar
templatefunc = registrar.templatefunc()
@templatefunc('custom()')
def custom(context, mapping, args):
return 'custom'
""" > "$TESTTMP/customfunc.py"
sh % "cat" << r"""
[extensions]
customfunc = $TESTTMP/customfunc.py
""" > ".hg/hgrc"
sh % "hg log -r . -T '{custom()}\\n' --config 'customfunc.enabled=true'" == "custom"
sh % "cd .."
# Test 'graphwidth' in 'hg log' on various topologies. The key here is that the
# printed graphwidths 3, 5, 7, etc. should all line up in their respective
# columns. We don't care about other aspects of the graph rendering here.
sh % "hg init graphwidth"
sh % "cd graphwidth"
sh % "'wrappabletext=a a a a a a a a a a a a'"
sh % "printf 'first\\n'" > "file"
sh % "hg add file"
sh % 'hg commit -m "$wrappabletext"'
sh % "printf 'first\\nsecond\\n'" > "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg checkout 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "printf 'third\\nfirst\\n'" > "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg merge" == r"""
merging file
0 files updated, 1 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 3
│
│ @ 5
├─╯
o 3"""
sh % 'hg commit -m "$wrappabletext"'
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 5
├─╮
│ o 5
│ │
o │ 5
├─╯
o 3"""
sh % "hg checkout 0" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "printf 'third\\nfirst\\nsecond\\n'" > "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 3
│
│ o 7
│ ├─╮
│ │ o 7
├───╯
│ o 5
├─╯
o 3"""
sh % "hg log --graph -T '{graphwidth}' -r 3" == r"""
o 5
├─╮
│ │
~ ~"""
sh % "hg log --graph -T '{graphwidth}' -r 1" == r"""
o 3
│
~"""
sh % "hg merge" == r"""
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % 'hg commit -m "$wrappabletext"'
sh % "printf 'seventh\\n'" >> "file"
sh % 'hg commit -m "$wrappabletext"'
sh % "hg log --graph -T '{graphwidth}'" == r"""
@ 3
│
o 5
├─╮
│ o 5
│ │
o │ 7
├───╮
│ │ o 7
│ ├─╯
o │ 5
├─╯
o 3"""
# The point of graphwidth is to allow wrapping that accounts for the space taken
# by the graph.
sh % "'COLUMNS=10' hg log --graph -T '{fill(desc, termwidth - graphwidth)}'" == r"""
@ a a a a
│ a a a a
│ a a a a
o a a a
├─╮ a a a
│ │ a a a
│ │ a a a
│ o a a a
│ │ a a a
│ │ a a a
│ │ a a a
o │ a a
├───╮ a a
│ │ │ a a
│ │ │ a a
│ │ │ a a
│ │ │ a a
│ │ o a a
│ ├─╯ a a
│ │ a a
│ │ a a
│ │ a a
│ │ a a
o │ a a a
├─╯ a a a
│ a a a
│ a a a
o a a a a
a a a a
a a a a"""
# Something tricky happens when there are elided nodes; the next drawn row of
# edges can be more than one column wider, but the graph width only increases by
# one column. The remaining columns are added in between the nodes.
sh % "hg log --graph -T '{graphwidth}' -r '0|2|4|5'" == r"""
o 7
├─┬─╮
o ╷ ╷ 7
├─╯ ╷
│ o 7
├───╯
o 3"""
sh % "cd .."
# Confirm that truncation does the right thing
sh % "hg debugtemplate '{truncatelonglines(\"abcdefghijklmnopqrst\\n\", 10)}'" == "abcdefghij"
sh % pycompat.decodeutf8(
b'hg debugtemplate \'{truncatelonglines("abcdefghijklmnopqrst\\n", 10, "\xe2\x80\xa6")}\''
) == pycompat.decodeutf8(b"abcdefghi\xe2\x80\xa6 (esc)")
sh % "hg debugtemplate '{truncate(\"a\\nb\\nc\\n\", 2)}'" == r"""
a
b"""
sh % 'hg debugtemplate \'{truncate("a\\nb\\nc\\n", 2, "truncated\\n")}\'' == r"""
a
truncated"""
# Test case expressions
sh % "hg debugtemplate \"{case('a', 'a', 'A', 'b', 'B', 'c', 'C')}\"" == "A"
sh % "hg debugtemplate \"{case('b', 'a', 'A', 'b', 'B', 'c', 'C', 'D')}\"" == "B"
sh % "hg debugtemplate \"{case('x', 'a', 'A', 'b', 'B', 'c', 'C')}\"" == ""
sh % "hg debugtemplate \"{case('x', 'a', 'A', 'b', 'B', 'c', 'C', 'D')}\"" == "D"
| gpl-2.0 | -8,763,705,720,192,593,000 | 28.222463 | 604 | 0.572474 | false |
evolaemp/northeuralex_website | northeuralex/datatables.py | 1 | 6527 | from clld.db.meta import DBSession
from clld.web.datatables.base import Col, IntegerIdCol, LinkToMapCol, LinkCol
from clld.web.util.helpers import external_link, link, map_marker_img
from clld.web.util.htmllib import HTML
from clld.web import datatables
from northeuralex.models import Concept, Doculect, Word
"""
Columns
"""
class IsoCodeCol(Col):
"""
Custom column to set a proper title for the iso_code column of the
languages table.
"""
__kw__ = {'sTitle': 'ISO 639-3'}
class GlottoCodeCol(Col):
"""
Custom column to present the glotto_code column of the languages table as a
link to the respective languoid in Glottolog.
"""
__kw__ = {'sTitle': 'Glottocode'}
def format(self, doculect):
href = 'http://glottolog.org/resource/languoid/id/{}'.format(doculect.glotto_code)
return external_link(href, doculect.glotto_code)
class FamilyCol(Col):
"""
Custom column to replace the search with a drop-down and to add icons for
the family column of the languages table.
Unlike in, e.g., NextStepCol, the choices have to be set in the constructor
because otherwise the unit tests do not work.
The icons are handled in the format method, the code being stolen from the
datatable module of the clld-glottologfamily-plugin repo.
"""
def __init__(self, *args, **kwargs):
kwargs['choices'] = sorted([
x[0] for x in DBSession.query(Doculect.family).distinct()])
super().__init__(*args, **kwargs)
def format(self, doculect):
return HTML.div(map_marker_img(self.dt.req, doculect), ' ', doculect.family)
class SubfamilyCol(Col):
"""
Custom column to replace the search with a drop-down for the subfamily
column of the languages table.
Unlike in, e.g., NextStepCol, the choices have to be set in the constructor
because otherwise the unit tests do not work.
"""
def __init__(self, *args, **kwargs):
kwargs['choices'] = sorted([
x[0] for x in DBSession.query(Doculect.subfamily).distinct()])
super().__init__(*args, **kwargs)
class ConcepticonCol(Col):
"""
Custom column to present the concepticon_name column of the concepts table
as a link to the respective concept in the Concepticon.
"""
__kw__ = {'sTitle': 'Concepticon'}
def format(self, concept):
if concept.concepticon_id:
href = 'http://concepticon.clld.org/parameters/{}'.format(concept.concepticon_id)
return external_link(href, concept.concepticon_name)
else:
return ''
class ConceptLinkCol(LinkCol):
"""
Custom column to present the concept column of the words table as a link
with a title attribute containing the concept's English name.
"""
def format(self, item):
concept = self.get_obj(item)
if concept:
return link(self.dt.req, concept, **{'title': concept.english_name})
else:
return ''
class DoculectLinkCol(LinkCol):
"""
Custom column to present the doculect column of the words table as a link
with a title attribute containing the doculect's family and subfamily.
"""
def format(self, item):
doculect = self.get_obj(item)
if doculect:
title = '{} ({}, {})'.format(doculect.name,
doculect.family, doculect.subfamily)
return link(self.dt.req, doculect, **{'title': title})
else:
return ''
class NextStepCol(Col):
"""
Custom column to replace the search with a drop-down for the next_step
column of the words table. Also provides help info in the column's header.
"""
__kw__ = {
'sTitle': (
'<abbr title="'
'process → review → validate'
'">Next action</abbr>'),
'choices': [('validate', 'validate'),
('review', 'review'),
('process', 'process')] }
"""
Tables
"""
class LanguagesDataTable(datatables.Languages):
def col_defs(self):
return [
LinkToMapCol(self, 'm'),
LinkCol(self, 'name'),
GlottoCodeCol(self, 'glotto_code', model_col=Doculect.glotto_code),
IsoCodeCol(self, 'iso_code', model_col=Doculect.iso_code),
FamilyCol(self, 'family', model_col=Doculect.family),
SubfamilyCol(self, 'subfamily', model_col=Doculect.subfamily),
Col(self, 'latitude'),
Col(self, 'longitude') ]
class ConceptsDataTable(datatables.Parameters):
def col_defs(self):
return [
IntegerIdCol(self, 'id', model_col=Concept.id),
LinkCol(self, 'name'),
Col(self, 'english', model_col=Concept.english_name),
Col(self, 'german', model_col=Concept.german_name),
Col(self, 'russian', model_col=Concept.russian_name),
ConcepticonCol(self, 'concepticon', model_col=Concept.concepticon_name) ]
class WordsDataTable(datatables.Values):
def col_defs(self):
res = []
if self.language:
res.extend([
IntegerIdCol(self, 'id', model_col=Concept.id,
get_object=lambda x: x.valueset.parameter),
ConceptLinkCol(self, 'concept', model_col=Concept.name,
get_object=lambda x: x.valueset.parameter) ])
elif self.parameter:
res.extend([
DoculectLinkCol(self, 'language', model_col=Doculect.name,
get_object=lambda x: x.valueset.language) ])
res.extend([
Col(self, 'form', model_col=Word.name, sTitle='Orthographic form'),
Col(self, 'raw_ipa', model_col=Word.raw_ipa, sTitle='Automatically generated IPA'),
# Col(self, 'norm_ipa', model_col=Word.norm_ipa, sTitle='Normalised IPA'),
NextStepCol(self, 'next_step', model_col=Word.next_step) ])
return res
class SourcesDataTable(datatables.Sources):
def col_defs(self):
return super().col_defs()[:-1]
"""
Hooks
"""
def includeme(config):
"""
Magical (not in the good sense) hook that replaces the default data tables
with the custom ones defined in this module.
"""
config.register_datatable('languages', LanguagesDataTable)
config.register_datatable('parameters', ConceptsDataTable)
config.register_datatable('values', WordsDataTable)
config.register_datatable('sources', SourcesDataTable)
| mit | -2,206,149,159,916,614,000 | 28.251121 | 95 | 0.619807 | false |
opena11y/fae2 | fae2/populate/pop_wcag20.py | 1 | 10340 | """
Copyright 2014-2016 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: populate/pop_wcag20.py
Author: Jon Gunderson
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import django
from django.core.exceptions import ObjectDoesNotExist
fp = os.path.realpath(__file__)
path, filename = os.path.split(fp)
fae2_path = path.split('/populate')[0]
sys.path.append(fae2_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fae2.settings')
from django.conf import settings
django.setup()
from wcag20.models import Principle, Guideline, SuccessCriterion
"""This file is for populating the database with WCAG 2.0 References"""
# Principle.objects.all().delete()
# Guideline.objects.all().delete()
# SuccessCriterion.objects.all().delete()
def create_wcag20(wcag20):
print("wcag 2.0")
for principle in wcag20:
principle_url = 'http://www.w3.org/TR/WCAG20/#' + principle[2]
try:
wcag20_principle = Principle.objects.get(num=principle[0])
print(" " + wcag20_principle.title + " (found)")
wcag20_principle.title = principle[1]
wcag20_principle.url = principle_url
print(principle[1] + " (updated) " + principle[0])
except:
wcag20_principle = Principle(num=principle[0], title=principle[1], url=principle_url)
print(principle[1] + " (CREATED)")
wcag20_principle.save()
for guideline in principle[3]:
guideline_url = 'http://www.w3.org/TR/WCAG20/#' + guideline[2]
guideline_slug = 'p' + principle[0] + 'g' + str(guideline[0])
try:
wcag20_guideline = Guideline.objects.get(principle=wcag20_principle, num=guideline[0])
print(" " + wcag20_guideline.title + " (found)")
wcag20_guideline.title = guideline[1]
wcag20_guideline.url = guideline_url
wcag20_guideline.slug = guideline_slug
print(" " + guideline[1] + " (updated)")
except:
wcag20_guideline = Guideline(principle=wcag20_principle, num=guideline[0], title=guideline[1], url=guideline_url, slug=guideline_slug)
print(" " + guideline[1] + " (CREATED)")
wcag20_guideline.save()
for requirement in guideline[3]:
requirement_url = 'http://www.w3.org/TR/WCAG20/#' + requirement[2]
meet_url = 'http://www.w3.org/WAI/WCAG20/quickref/#qr-' + requirement[2] + '.html'
understand_url = 'http://www.w3.org/TR/WCAG20/' + requirement[2] + '.html'
requirement_slug = guideline_slug + 'sc' + str(requirement[0])
try:
wcag20_requirement = SuccessCriterion.objects.get(guideline=wcag20_guideline, num=requirement[0])
print(" " + wcag20_requirement.title + " (found)")
wcag20_requirement.title = requirement[1]
wcag20_requirement.url = requirement_url
wcag20_requirement.url_meet = meet_url
wcag20_requirement.url_understand = understand_url
wcag20_requirement.level = requirement[3]
wcag20_requirement.slug = requirement_slug
print(" " + requirement[1] + " (updated)")
except:
wcag20_requirement = SuccessCriterion(guideline=wcag20_guideline, num=requirement[0], title=requirement[1], url=requirement_url, url_meet=meet_url, url_understand=understand_url, level=requirement[3], slug=requirement_slug)
print(" " + requirement[1] + " (CREATED)")
wcag20_requirement.save()
wcag20 = (
('1', 'Perceivable - Information and user interface components must be presentable to users in ways they can perceive.', 'perceivable',
(
('1', 'Text Alternatives', 'text-equiv',
(
('1', 'Non-text Content', 'text-equiv', '1',),
),
),
('2', 'Time-based Media', 'media-equiv',
(
('1', 'Audio-only and Video-only (Prerecorded)', 'media-equiv-av-only-alt','1',),
('2', 'Captions (Prerecorded)', 'media-equiv-captions','1',),
('3', 'Audio Description or Media Alternative (Prerecorded)', 'media-equiv-audio-desc','1',),
('4', 'Captions (Live)', 'media-equiv-real-time-captions','2',),
('5', 'Audio Description (Prerecorded)', 'media-equiv-audio-desc-only','2',),
('6', 'Sign Language (Prerecorded)', 'media-equiv-sign','3',),
('7', 'Extended Audio Description (Prerecorded)', 'media-equiv-extended-ad','3',),
('8', 'Media Alternative (Prerecorded)', 'media-equiv-text-doc','3',),
('9', 'Audio-only (Live)', 'media-equiv-live-audio-only','3',),
),
),
('3', 'Adaptable', 'content-structure-separation',
(
('1', 'Info and Relationships', 'content-structure-separation-programmatic','1',),
('2', 'Meaningful Sequence', 'content-structure-separation-sequenc','1',),
('3', 'Sensory Characteristics', 'content-structure-separation-understanding','1',),
),
),
('4', 'Distinguishable', 'visual-audio-contrast',
(
('1', 'Use of Color', 'visual-audio-contrast-without-color','1',),
('2', 'Audio Control', 'visual-audio-contrast-dis-audio','1',),
('3', 'Contrast (Minimum)', 'visual-audio-contrast-contrast','2',),
('4', 'Resize text', 'visual-audio-contrast-scale','2',),
('5', 'Images of Text', 'visual-audio-contrast-text-presentation','2',),
('6', 'Contrast (Enhanced)', 'visual-audio-contrast7','3',),
('7', 'Low or No Background Audio', 'visual-audio-contrast-noaudio','3',),
('8', 'Visual Presentation', 'visual-audio-contrast-visual-presentation','3',),
('9', 'Images of Text (No Exception)', 'visual-audio-contrast-text-images','3',),
),
),
),
),
('2', 'Operable - User interface components and navigation must be operable.', 'perceivable',
(
('1', 'Keyboard Accessible', 'keyboard-operation',
(
('1', 'Keyboard', 'keyboard-operation-keyboard-operable', '1',),
('2', 'No Keyboard Trap', 'keyboard-operation-trapping', '1',),
('3', 'Keyboard (No Exception)', 'keyboard-operation-all-funcs', '3',),
),
),
('2', 'Enough Time', '',
(
('1', 'Timing Adjustable', 'time-limits-required-behaviors', '1',),
('2', 'Pause, Stop, Hide', 'time-limits-pause', '1',),
('3', 'No Timing', 'time-limits-no-exceptions', '3',),
('4', 'Interruptions', 'time-limits-postponed', '3',),
('5', 'Re-authenticating', 'time-limits-server-timeout', '3',),
),
),
('3', 'Seizures', 'seizure',
(
('1', 'Three Flashes or Below Threshold', 'seizure-does-not-violate', '1',),
('2', 'Three Flashes', 'seizure-three-times', '3',),
),
),
('4', 'Navigable', 'navigation-mechanisms',
(
('1', 'Bypass Blocks', 'navigation-mechanisms-skip', '1',),
('2', 'Page Titled', 'navigation-mechanisms-title', '1',),
('3', 'Focus Order', 'navigation-mechanisms-focus-order', '1',),
('4', 'Link Purpose (In Context)', 'navigation-mechanisms-refs', '1',),
('5', 'Multiple Ways', 'navigation-mechanisms-mult-loc', '2',),
('6', 'Headings and Labels', 'navigation-mechanisms-descriptive', '2',),
('7', 'Focus Visible', 'navigation-mechanisms-focus-visible', '2',),
('8', 'Location', 'navigation-mechanisms-location', '3',),
('9', 'Link Purpose (Link Only)', 'navigation-mechanisms-link', '3',),
('10', 'Section Headings', 'navigation-mechanisms-headings', '3',),
),
),
),
),
('3', 'Understandable - Information and the operation of user interface must be understandable.', 'understandable',
(
('1', 'Readable', 'meaning',
(
('1', 'Language of Page', 'meaning-doc-lang-id', '1',),
('2', 'Language of Parts', 'meaning-other-lang-id', '2',),
('3', 'Unusual Words', 'meaning-idioms', '3',),
('4', 'Abbreviations ', 'meaning-located', '3',),
('5', 'Reading Level', 'meaning-supplements', '3',),
('6', 'Pronunciation', 'meaning-pronunciation', '3',),
),
),
('2', 'Predictable', 'consistent-behavior',
(
('1', 'On Focus', 'consistent-behavior-receive-focus', '1',),
('2', 'On Input', 'consistent-behavior-unpredictable-change', '1',),
('3', 'Consistent Navigation', 'consistent-behavior-consistent-locations', '2',),
('4', 'Consistent Identification', 'consistent-behavior-consistent-functionality', '2',),
('5', 'Change on Request', 'consistent-behavior-no-extreme-changes-context', '3',),
),
),
('3', 'Input Assistance', 'minimize-error',
(
('1', 'Error Identification', 'minimize-error-identified', '1',),
('2', 'Labels or Instructions', 'minimize-error-cues', '1',),
('3', 'Error Suggestion', 'minimize-error-suggestions', '2',),
('4', 'Error Prevention (Legal, Financial, Data)', 'minimize-error-reversible', '2',),
('5', 'Help', 'minimize-error-context-help', '3',),
('6', 'Error Prevention (All)', 'minimize-error-reversible-all', '3',),
),
),
),
),
('4', 'Robust - Content must be robust enough that it can be interpreted reliably by a wide variety of user agents, including assistive technologies.', 'robust',
(
('1', ' Compatible', 'ensure-compat',
(
('1', 'Parsing', 'ensure-compat-parses', '1',),
('2', 'Name, Role, Value', 'ensure-compat-rsv', '1',),
),
),
)
)
)
create_wcag20( wcag20 )
| apache-2.0 | -2,793,003,747,385,330,700 | 43.377682 | 241 | 0.583462 | false |
cedriclaunay/gaffer | python/GafferImageTest/ImageNodeTest.py | 1 | 3075 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import IECore
import Gaffer
import GafferTest
import GafferImage
class ImageNodeTest( GafferTest.TestCase ) :
def testCacheThreadSafety( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 200, 200, 1.0 ) )
g = GafferImage.Grade()
g["in"].setInput( c["out"] )
g["multiply"].setValue( IECore.Color3f( 0.4, 0.5, 0.6 ) )
gradedImage = g["out"].image()
# not enough for both images - will cause cache thrashing
Gaffer.ValuePlug.setCacheMemoryLimit( 2 * g["out"].channelData( "R", IECore.V2i( 0 ) ).memoryUsage() )
images = []
exceptions = []
def grader() :
try :
images.append( g["out"].image() )
except Exception, e :
exceptions.append( e )
threads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target = grader )
threads.append( thread )
thread.start()
for thread in threads :
thread.join()
for image in images :
self.assertEqual( image, gradedImage )
for e in exceptions :
raise e
def setUp( self ) :
self.__previousCacheMemoryLimit = Gaffer.ValuePlug.getCacheMemoryLimit()
def tearDown( self ) :
Gaffer.ValuePlug.setCacheMemoryLimit( self.__previousCacheMemoryLimit )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -8,835,231,544,761,158,000 | 31.368421 | 104 | 0.676098 | false |
cxhernandez/osprey | osprey/cross_validators.py | 1 | 3587 | from __future__ import print_function, absolute_import, division
from .utils import num_samples
import numpy as np
class BaseCVFactory(object):
short_name = None
def load(self):
raise NotImplementedError('should be implemented in subclass')
def create(self, X, y):
raise NotImplementedError('should be implemented in subclass')
class ShuffleSplitFactory(BaseCVFactory):
short_name = ['shufflesplit', 'ShuffleSplit']
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def create(self, X, y=None):
from sklearn.cross_validation import ShuffleSplit
return ShuffleSplit(num_samples(X), n_iter=self.n_iter,
test_size=self.test_size,
train_size=self.train_size,
random_state=self.random_state)
class KFoldFactory(BaseCVFactory):
short_name = ['kfold', 'KFold']
def __init__(self, n_folds=3, shuffle=False, random_state=None):
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
def create(self, X, y=None):
from sklearn.cross_validation import KFold
return KFold(num_samples(X), n_folds=self.n_folds, shuffle=self.shuffle,
random_state=self.random_state)
class LeaveOneOutFactory(BaseCVFactory):
short_name = ['loo', 'LeaveOneOut']
def __init__(self):
pass
def create(self, X, y=None):
from sklearn.cross_validation import LeaveOneOut
return LeaveOneOut(num_samples(X))
class StratifiedShuffleSplitFactory(BaseCVFactory):
short_name = ['stratifiedshufflesplit', 'StratifiedShuffleSplit']
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def create(self, X, y):
from sklearn.cross_validation import StratifiedShuffleSplit
return StratifiedShuffleSplit(y, n_iter=self.n_iter,
test_size=self.test_size,
train_size=self.train_size,
random_state=self.random_state)
class StratifiedKFoldFactory(BaseCVFactory):
short_name = ['stratifiedkfold', 'StratifiedKFold']
def __init__(self, n_folds=3, shuffle=False, random_state=None):
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
def create(self, X, y):
from sklearn.cross_validation import StratifiedKFold
return StratifiedKFold(y, n_folds=self.n_folds, shuffle=self.shuffle,
random_state=self.random_state)
class FixedCVFactory(BaseCVFactory):
"""
Cross-validator to use with a fixed, held-out validation set.
Parameters
----------
start : int
Start index of validation set.
stop : int, optional
Stop index of validation set.
"""
short_name = ['fixed', 'Fixed']
def __init__(self, start, stop=None):
self.valid = slice(start, stop)
def create(self, X, y):
indices = np.arange(num_samples(X))
valid = indices[self.valid]
train = np.setdiff1d(indices, valid)
return (train, valid), # return a nested tuple
| apache-2.0 | -7,692,136,297,481,168,000 | 29.398305 | 80 | 0.61249 | false |
cberridge/trappetroll | src/audio_player.py | 1 | 3668 | '''
Play mp3s and also call a function to move
the mouth when the mp3 starts to play
'''
import time
import os
import random
import glob
import pygame
import moves_play
class AudioPlayer(object):
'''-'''
def __init__(self, mouth_callback):
'''-'''
pygame.mixer.init()
pygame.mixer.music.set_volume(0)
# path relative to the directory in which this script runs
self.audio_directory = \
os.path.normpath(os.path.dirname(os.path.abspath(__file__))
+ '/../audio')
self.name_file = 'name_m.mp3'
self.mouth_callback = mouth_callback
self.is_killed = False
print self.audio_directory
def _play_single_mp3(self, file_name_with_path, volume=100.0):
'''-'''
self.is_killed = False
move_mouth = file_name_with_path.endswith('_m.mp3')
print "Playing... " + file_name_with_path
if pygame.mixer.music.get_busy():
pygame.mixer.stop()
pygame.mixer.music.set_volume(volume/100.0)
pygame.mixer.music.load(file_name_with_path)
pygame.mixer.music.play()
if move_mouth:
moves_play.play_moves(file_name_with_path,
self.mouth_callback,
self.is_playing,
async=False)
while self.is_playing():
time.sleep(0.01)
pygame.mixer.music.set_volume(0) # Removes hum
return not self.is_killed
def play_mp3(self, file_name, volume=100.0, has_path=False):
''' - '''
return_value = False
if has_path:
file_name_with_path = file_name
else:
file_name_with_path = self.audio_directory +'/'+ file_name
if os.path.isdir(file_name_with_path):
dir_name = file_name_with_path
# Get both directories and mp3 files in the current directory
dir_list = glob.glob(dir_name + '/*/')
# Remove the trailing slash!
dir_list = [directory[:-1] for directory in dir_list]
file_list = glob.glob(dir_name + '/*.mp3') + dir_list
file_list.sort()
if file_name_with_path.endswith('.rand'):
# play a random file in directory
random_index = int(random.random() * len(file_list))
return_value = self.play_mp3(file_list[random_index], volume,
True)
else:
# play all mp3s and directories in alphabetical order
for current_file in file_list:
if not self.play_mp3(current_file, volume, True):
return_value = False
break
elif file_name_with_path.endswith(self.name_file):
# if the file ends in name_m.mp3, don't play it, play the file
# in the top level audio directory
return_value = self._play_single_mp3(self.audio_directory + '/' +
self.name_file,
volume)
elif file_name_with_path.endswith('.mp3'):
return_value = self._play_single_mp3(file_name_with_path, volume)
else:
print 'no match: ' + file_name_with_path
return return_value
def kill_sound(self):
'''-'''
self.is_killed = True
pygame.mixer.music.stop()
pygame.mixer.music.set_volume(0)
@staticmethod
def is_playing():
'''-'''
return pygame.mixer.music.get_busy()
| gpl-2.0 | 2,995,576,482,805,124,600 | 32.345455 | 77 | 0.526445 | false |
jardiacaj/finem_imperii | world/management/commands/initialize_world.py | 1 | 1240 | import logging
from django.core.management.base import BaseCommand, CommandError
from world.initialization import initialize_world, AlreadyInitializedException
from world.models.geography import World
class Command(BaseCommand):
help = 'Initializes the specified world'
def add_arguments(self, parser):
parser.add_argument('world_id', nargs='+', type=int)
def handle(self, *args, **options):
logging.getLogger().setLevel(logging.INFO)
for world_id in options['world_id']:
try:
world = World.objects.get(pk=world_id)
except World.DoesNotExist:
raise CommandError(
'World with id {} does not exist'.format(world_id))
try:
initialize_world(world)
except AlreadyInitializedException:
raise CommandError('{} ({}) is already initialized'.format(
world,
world_id
))
self.stdout.write(
self.style.SUCCESS(
'Successfully initialized {} ({})'.format(
world,
world_id
)
)
)
| agpl-3.0 | -7,105,990,472,116,270,000 | 30 | 78 | 0.53871 | false |
ganga-devs/ganga | ganga/GangaCore/test/GPI/TestStartUp.py | 1 | 2097 |
import os
import inspect
import sys
import shutil
import glob
from tempfile import mkdtemp
# First clear away any configurations and temp files which may not be present on first launch
homeDir = os.path.expanduser("~")
if os.path.exists(os.path.join(homeDir, '.gangarc')):
os.unlink(os.path.join(homeDir, '.gangarc'))
for logFile in glob.glob(os.path.join(homeDir, '.ganga.log*')):
os.unlink(logFile)
shutil.rmtree(os.path.join(homeDir, '.ipython-ganga'), ignore_errors=True)
shutil.rmtree(os.path.join(homeDir, '.gangarc_backups'), ignore_errors=True)
def standardSetup():
"""Function to perform standard setup for GangaCore.
"""
gangaDir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), '../../ganga'))
sys.path.insert(0, gangaDir)
from GangaCore.PACKAGE import standardSetup
standardSetup()
standardSetup()
del standardSetup
this_dir = mkdtemp()
def testStartUp():
""" Lets test the startup of Ganga mimicking first launch """
# Process options given at command line and in configuration file(s)
# Perform environment setup and bootstrap
from GangaCore.Runtime import setupGanga
argv = ['ganga', '--no-mon', '-o[Configuration]gangadir=%s' % this_dir, '-o[Configuration]RUNTIME_PATH=GangaTest']
setupGanga(argv=argv, interactive=False)
for this_file in ['.gangarc', '.ganga.log']:
assert os.path.isfile(os.path.join(homeDir, this_file))
# No way known to mimic IPython starting up in a simple way
#assert os.path.isdir(os.path.join(homeDir, '.ipython-ganga'))
for this_folder in ['repository',]:
assert os.path.isdir(os.path.join(this_dir, this_folder))
from GangaCore.GPI import Job
j=Job()
j.submit()
for this_folder in ['shared', 'workspace']:
assert os.path.isdir(os.path.join(this_dir, this_folder))
def testShutdown():
""" Lets just call the shutdown here for safety """
from GangaCore.testlib.GangaUnitTest import stop_ganga
stop_ganga()
shutil.rmtree(this_dir, ignore_errors=True)
| gpl-2.0 | 2,743,772,439,762,664,000 | 29.838235 | 134 | 0.702909 | false |
madscatt/zazzie_1.5 | trunk/sassie/analyze/apbs/apbs.py | 1 | 12389 | '''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
import locale
import string
import time
import subprocess
from write_apbs_input import *
import sassie.sasmol.sasmol as sasmol
import sassie.sasconfig as sasconfig
# APBS
#
# 12/05/2004 -- initial coding : jc
# 01/02/2011 -- added sasmol support : jc
# 08/26/2011 -- adapted for mdx : jc
# 06/16/2012 -- adapted for namd v. 2.9 : jc
# 09/10/2012 -- adapted for apbs : jc
#
# LC 1 2 3 4 5 6 7
# LC4567890123456789012345678901234567890123456789012345678901234567890123456789
# * **
'''
APBS is the module that contains the functions
that are used to run a series of electrostatic calculations
on a set of structures in a supplied pdb/dcd file.
This module is called from APBS in the main
GUI through the graphical_apbs.py script.
REFERENCE:
Baker NA, Sept D, Joseph S, Holst MJ, McCammon JA. Electrostatics of
nanosystems: application to microtubules and the ribosome.
Proc. Natl. Acad. Sci. USA 98, 10037-10041 2001.
M. Holst and F. Saied, Multigrid solution of the Poisson-Boltzmann equation.
J. Comput. Chem. 14, 105-113, 1993.
M. Holst and F. Saied, Numerical solution of the nonlinear Poisson-Boltzmann
equation: Developing more robust and efficient methods.
J. Comput. Chem. 16, 337-364, 1995.
M. Holst, Adaptive numerical treatment of elliptic systems on manifolds.
Advances in Computational Mathematics 15, 139-191, 2001.
R. Bank and M. Holst, A New Paradigm for Parallel Adaptive Meshing Algorithms.
SIAM Review 45, 291-323, 2003.
'''
def unpack_variables(variables):
runname = variables['runname'][0]
infile = variables['infile'][0]
pdbfile = variables['pdbfile'][0]
outfile = variables['outfile'][0]
temperature = variables['temperature'][0]
ph = variables['ph'][0]
ion_charge = variables['ion_charge'][0]
ion_conc = variables['ion_conc'][0]
ion_radius = variables['ion_radius'][0]
manual_flag = variables['manual_flag'][0]
manual_file = variables['manual_file'][0]
#energyfile = variables['energyfile'][0]
#keepout = variables['keepout'][0]
return runname, infile, pdbfile, outfile, temperature, ph, ion_charge, ion_conc, ion_radius, manual_flag, manual_file
def print_failure(message, txtOutput):
txtOutput.put("\n\n>>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n")
txtOutput.put(">>>> RUN FAILURE <<<<\n\n")
txtOutput.put(message)
return
def rename_his(m1):
natoms = m1.natoms()
resname = m1.resname()
new_resname = []
for i in xrange(natoms):
this_resname = resname[i]
if(this_resname == 'HSE' or this_resname == 'HSD' or this_resname == 'HSP'):
new_resname.append('HIS')
else:
new_resname.append(this_resname)
m1.setResname(new_resname)
return
def apbs_driver(variables, txtOutput):
'''
APBS_DRIVER is the function to read in variables from GUI input and
used to run a series of apbs calculations
on a set of structures in a supplied pdb/dcd file.
INPUT: variable descriptions:
runname: run_name
infile: input pdb or dcd filename
pdbfile: input pdb file (reference)
temperature: temperature of simulation
OUTPUT:
txtOutput: TK handler for output to GUI textbox
files stored in ~/run_name/apbs directory:
outfile: output filename
'''
runname, infile, pdbfile, outfile, temperature, ph, ion_charge, ion_conc, ion_radius, manual_flag, manual_file = unpack_variables(
variables)
keepout = 1
dcdfreq = 1
path = runname + '/apbs/'
print 'path = ', path
print 'infile = ', infile
vers = 'version 0.1 : 09/10/12 : jc'
direxist = os.path.exists(path)
if(direxist == 0):
try:
result = os.system('mkdir -p ' + path)
except:
message = 'can not create project directory: ' + path
message += '\nstopping here\n'
print_failure(message, txtOutput)
if(result != 0):
message = 'can not create project directory: ' + path
message += '\nstopping here\n'
print_failure(message, txtOutput)
m1 = sasmol.SasMol(0)
m2 = sasmol.SasMol(0)
m1.read_pdb(pdbfile)
m2.read_pdb(pdbfile, fastread=True)
rename_his(m1)
rename_his(m2)
try:
if(infile[-3:] == 'dcd'):
infiletype = 'dcd'
elif(infile[-3:] == 'pdb'):
infiletype = 'pdb'
except:
message = 'input filename is a PDB or DCD file but it must end with ".pdb" or ".dcd" '
message += ' : stopping here'
print_failure(message, txtOutput)
print 'infiletype = ', infiletype
if(infiletype == 'dcd'):
min_max = m2.calc_minmax_all_steps(infile)
dcdfile = m1.open_dcd_read(infile)
nf = dcdfile[2]
else:
m1.read_pdb(infile)
nf = m1.coor()[:, 0, 0].shape[0]
min_max = m2.calc_minmax_all_steps(infile, pdb='pdb')
print 'number of frames = ', nf
print 'min_max = ', min_max
maximum_dimensions = [min_max[1][0] - min_max[0][0],
min_max[1][1] - min_max[0][1], min_max[1][2] - min_max[0][2]]
print 'min_max = ', min_max
print 'maximum_dimensions = ', maximum_dimensions
# ttxt=time.ctime()
ttxt = time.asctime(time.gmtime(time.time()))
st = ''.join(['=' for x in xrange(60)])
txtOutput.put("\n%s \n" % (st))
txtOutput.put("DATA FROM RUN: %s \n\n" % (ttxt))
final_energy = []
coorlist = []
for i in range(nf):
print 'apbs calculation for frame ', i + 1, ' of ', nf
print 'apbs calculation for frame ', i + 1, ' of ', nf
print 'apbs calculation for frame ', i + 1, ' of ', nf
print 'writing temporary PDB file'
if(infiletype == 'dcd'):
m1.read_dcd_step(dcdfile, i)
m1.write_pdb(path + 'junk.pdb', 0, 'w')
else:
m1.write_pdb(path + 'junk.pdb', i, 'w')
print 'writing temporary APBS input file'
if(i < 9):
istr = '0000' + str(i + 1)
elif(i < 99):
istr = '000' + str(i + 1)
elif(i < 999):
istr = '00' + str(i + 1)
elif(i < 9999):
istr = '0' + str(i + 1)
elif(i < 99999):
istr = str(i + 1)
else:
print 'wow, man!'
istr = str(i + 1)
thisdcd = path + 'min_' + istr + '.dcd'
if(manual_flag == 0):
inputfilename = 'junk.in'
write_apbs_input(maximum_dimensions, temperature,
inputfilename, ion_charge, ion_conc, ion_radius)
else:
inputfilename = manual_file
print 'starting apbs calculation ( nfiles = ', nf, ')'
ttime = time.ctime()
runstring = vers + ' : ' + outfile + ' run stated at : ' + ttime
print runstring
ncpu = 1
bin_path = sasconfig._bin_path
if(ncpu == 1):
print 'starting pdb2pqr calculation number: ', istr
#run_pdb2pqr = 'python /usr/local/bin/pdb2pqr/pdb2pqr.py --ff=charmm --with-ph='+str(ph)+' -v '+path+'junk.pdb junk.pqr >& pdb2pqr.out'
run_pdb2pqr = 'python ' + bin_path + 'pdb2pqr.py --ff=charmm --with-ph=' + \
str(ph) + ' -v ' + path + 'junk.pdb junk.pqr >& pdb2pqr.out'
os.system(run_pdb2pqr)
print 'starting apbs calculation number: ', istr
#nst='/usr/local/bin/apbs junk.in >& junk.out &'
nst = bin_path + '/apbs junk.in >& junk.out &'
p = subprocess.Popen(nst, shell=True, executable='/bin/bash')
sts = os.waitpid(p.pid, 0)[1]
print 'p.pid = ', p.pid
thisjob = str(int(p.pid) + 1)
run = 1
esteps = 0
while(run == 1):
# time.sleep(5)
lsst = 'ls junk.out | grep -c "junk.out" '
lsfile = os.popen(lsst, 'r').readlines()
stls = string.split(lsfile[0])
nstls = locale.atoi(stls[0])
if(nstls > 0):
tout2 = os.popen(
'tail -15 junk.out | grep "Thanks for using"', 'r').readlines()
if(len(tout2) > 0):
print 'finished apbs calculation'
run = 0
fraction_done = (float(i + 1) / float(nf))
progress_string = 'COMPLETED ' + \
str(i + 1) + ' of ' + str(nf) + ' : ' + \
str(fraction_done * 100.0) + ' % done'
print('%s\n' % progress_string)
print('%s\n' % progress_string)
report_string = 'STATUS\t' + str(fraction_done)
txtOutput.put(report_string)
print 'finished run'
mvst = 'mv io.mc ' + path + 'apbs_' + istr + '_io.mc'
os.system(mvst)
mvst = 'mv pot.dx ' + path + 'apbs_' + istr + '_pot.dx.mc'
os.system(mvst)
mvst = 'mv pdb2pqr.out ' + path + 'apbs_' + istr + '_pdb2pqr.dat'
os.system(mvst)
mvst = 'mv ' + path + 'junk.pdb ' + path + 'apbs_' + istr + '.pdb'
os.system(mvst)
mvst = 'mv junk.out ' + path + 'apbs_' + istr + '.out'
os.system(mvst)
mvst = 'mv junk.pqr ' + path + 'apbs_' + istr + '.pqr'
os.system(mvst)
mvst = 'mv junk.propka ' + path + 'apbs_' + istr + '.propka'
os.system(mvst)
# mvst = 'mv junk-input.p '+path+'apbs_input.p.'+istr+'.pqr'
# os.system(mvst)
mvst = 'mv junk.in ' + path + 'apbs_' + istr + '.in'
os.system(mvst)
#os.system('mv energy_results.out '+path+'energy_results_'+istr+'.out')
if(infiletype == 'dcd'):
m1.close_dcd_read(dcdfile[0])
txtOutput.put("Total number of frames = %d\n\n" % (nf))
txtOutput.put("output energies saved to : %s\n" % ('./' + path))
txtOutput.put("\n%s \n" % (st))
time.sleep(0.5)
print 'APBS IS DONE'
return()
if __name__ == '__main__':
# BEGIN USER EDIT
# BEGIN USER EDIT
# BEGIN USER EDIT
runname = 'run_0'
pdbfile = 'ten_mer.pdb'
infile = 'ten_mer_two_frames.dcd'
outfile = 'apbs.dat'
ph = '5.5'
temperature = '300.0'
ion_conc = '0.15'
ion_charge = '1.0'
ion_radius = '1.62'
manual_flag = '0'
manual_file = 'test_input_file.txt'
# END USER EDIT
# END USER EDIT
# END USER EDIT
svariables = {}
svariables['runname'] = (runname, 'string')
svariables['infile'] = (infile, 'string')
svariables['pdbfile'] = (pdbfile, 'string')
svariables['outfile'] = (outfile, 'string')
svariables['ph'] = (ph, 'float')
svariables['temperature'] = (temperature, 'float')
svariables['ion_charge'] = (ion_charge, 'float')
svariables['ion_conc'] = (ion_conc, 'float')
svariables['ion_radius'] = (ion_radius, 'float')
svariables['manual_flag'] = (manual_flag,'int' )
svariables['manual_file'] = (manual_file, 'string')
import sassie.interface.input_filter as input_filter
error, variables = input_filter.type_check_and_convert(svariables)
if(len(error) > 0):
print 'error = ', error
sys.exit()
runname = variables['runname'][0]
import multiprocessing
import shutil
import os
if os.path.exists(runname + '/apbs'):
shutil.rmtree(runname + '/apbs')
txtQueue = multiprocessing.JoinableQueue()
apbs_driver(variables, txtQueue)
| gpl-3.0 | -7,442,169,570,840,521,000 | 31.688654 | 147 | 0.566309 | false |
wurstmineberg/systemd-minecraft | minecraft/__init__.py | 1 | 39178 | #!/usr/bin/env python3
"""Systemd init script for one or more vanilla Minecraft servers.
Usage:
minecraft [options] (start | stop | kill | restart | status | backup) [<world>...]
minecraft [options] (update | revert) [<world> [snapshot <snapshot-id> | <version>]]
minecraft [options] saves (on | off) [<world>...]
minecraft [options] update-all [snapshot <snapshot-id> | <version>]
minecraft [options] command <world> [--] <command>...
minecraft -h | --help
minecraft --version
Options:
-h, --help Print this message and exit.
--all Apply the action to all configured worlds.
--config=<config> Path to the config file [default: /opt/wurstmineberg/config/systemd-minecraft.json].
--enabled Apply the action to all enabled worlds. This option is intended to be used only by the service file, to automatically start all enabled worlds on boot.
--main Apply the action to the main world. This is the default.
--no-backup Don't back up the world(s) before updating/reverting.
--version Print version info and exit.
"""
import sys
sys.path.append('/opt/py')
import contextlib
import datetime
import docopt
import errno
import gzip
import json
import loops
import mcrcon
import more_itertools
import os
import signal
import os.path
import pathlib
import pwd
import re
import requests
import shutil
import socket
import subprocess
import threading
import time
import urllib.parse
try:
from minecraft.version import __version__
except ImportError:
__version__ = None
from wmb import get_config, from_assets
CONFIG = get_config("systemd-minecraft", base = from_assets(__file__))
if __name__ == '__main__':
arguments = docopt.docopt(__doc__, version='Minecraft init script {}'.format(__version__))
for key in CONFIG['paths']:
if isinstance(CONFIG['paths'][key], str):
CONFIG['paths'][key] = pathlib.Path(CONFIG['paths'][key])
class World:
def __init__(self, name=None):
if name is None:
name = CONFIG['mainWorld']
if name in CONFIG['worlds']:
self.name = name
else:
raise ValueError('no such world')
def __repr__(self):
return 'minecraft.World({!r})'.format(self.name)
def __str__(self):
return self.name
def backup(self, announce=False, reply=print, path=None, *, copy_to_latest=None):
"""Back up the Minecraft world.
Optional arguments:
announce -- Whether to announce in-game that saves are being disabled/reenabled. Defaults to False.
reply -- This function is called with human-readable progress updates. Defaults to the built-in print function.
path -- Where the backup will be saved. The file extension .tar.gz will be appended automatically. Defaults to a file with the world name and a timestamp in the backups directory.
Keyword-only arguments:
copy_to_latest -- Whether to create or update the copy of the world directory at backups/latest. Defaults to True for the main world and to False for all other worlds.
Returns:
A pathlib.Path representing the gzipped backup tarball.
"""
if copy_to_latest is None:
copy_to_latest = self.is_main
self.save_off(announce=announce, reply=reply)
if path is None:
path = str(self.backup_path / '{}_{:%Y-%m-%d_%Hh%M}'.format(self.name, datetime.datetime.utcnow()))
else:
path = str(path)
backup_file = pathlib.Path(path + '.tar')
reply('Backing up minecraft world...')
if not backup_file.parent.exists():
# make sure the backup directory exists
backup_file.parent.mkdir(parents=True)
subprocess.call(['tar', '-C', str(self.path), '-cf', str(backup_file), self.world_path.name]) # tar the world directory (e.g. /opt/wurstmineberg/world/wurstmineberg/world or /opt/wurstmineberg/world/wurstmineberg/wurstmineberg)
if copy_to_latest:
# make a copy of the world directory for the main world to be used by map rendering
subprocess.call(['rsync', '-av', '--delete', str(self.world_path) + '/', str(self.backup_path / 'latest')])
self.save_on(announce=announce, reply=reply)
reply('Compressing backup...')
subprocess.call(['gzip', '-f', str(backup_file)])
backup_file = pathlib.Path(str(backup_file) + '.gz')
if self.is_main and CONFIG['paths']['backupWeb'] is not None:
reply('Symlinking to httpdocs...')
if CONFIG['paths']['backupWeb'].is_symlink():
CONFIG['paths']['backupWeb'].unlink()
CONFIG['paths']['backupWeb'].symlink_to(backup_file)
reply('Done.')
return backup_file
@property
def backup_path(self):
return CONFIG['paths']['backup'] / self.name
def command(self, cmd, args=[], block=False):
"""Send a command to the server.
Required arguments:
cmd -- The command name.
Optional arguments:
args -- A list of arguments passed to the command.
block -- If True and the server is not running, tries to wait until the server is running to send the command. Defaults to False.
Raises:
MinecraftServerNotRunningError -- If the world is not running and block is set to False.
socket.error -- If the world is running but the RCON connection failed.
"""
while not self.status():
if block:
time.sleep(1)
else:
raise MinecraftServerNotRunningError('')
cmd += (' ' + ' '.join(str(arg) for arg in args)) if len(args) else ''
rcon = mcrcon.MCRcon()
rcon.connect('localhost', self.config['rconPort'], self.config['rconPassword'])
return rcon.command(cmd)
def cleanup(self, reply=print):
if self.pidfile_path.exists():
reply("Removing PID file...")
self.pidfile_path.unlink()
if self.socket_path.exists():
reply("Removing socket file...")
self.socket_path.unlink()
@property
def config(self):
ret = {
'customServer': CONFIG['worlds'][self.name].get('customServer', False),
'enabled': CONFIG['worlds'][self.name].get('enabled', False),
'javaOptions': CONFIG['javaOptions'].copy(),
'rconPassword': CONFIG['worlds'][self.name].get('rconPassword'),
'rconPort': CONFIG['worlds'][self.name].get('rconPort', 25575),
'whitelist': CONFIG['whitelist'].copy()
}
ret['javaOptions'].update(CONFIG['worlds'][self.name].get('javaOptions', {}))
ret['whitelist'].update(CONFIG['worlds'][self.name].get('whitelist', {}))
return ret
@property
def is_main(self):
return self.name == CONFIG['mainWorld']
def iter_update(self, version=None, snapshot=False, *, reply=print, log_path=None, make_backup=True, override=None):
"""Download a different version of Minecraft and restart the world if it is running. Returns a generator where each iteration performs one step of the update process.
Optional arguments:
version -- If given, a version with this name will be downloaded. By default, the newest available version is downloaded.
snapshot -- If version is given, this specifies whether the version is a development version. If no version is given, this specifies whether the newest stable version or the newest development version should be downloaded. Defaults to False.
Keyword-only arguments:
log_path -- This is passed to the stop and start functions if the server is stopped before the update.
make_backup -- Whether to back up the world before updating. Defaults to True.
override -- If this is true and the server jar for the target version already exists, it will be deleted and redownloaded. Defaults to True if the target version is the current version, False otherwise.
reply -- This function is called several times with a string argument representing update progress. Defaults to the built-in print function.
"""
# get version
versions_json = requests.get('https://launchermeta.mojang.com/mc/game/version_manifest.json').json()
if version is None: # try to dynamically get the latest version number from assets
version = versions_json['latest']['snapshot' if snapshot else 'release']
elif snapshot:
version = datetime.datetime.utcnow().strftime('%yw%V') + version
for version_dict in versions_json['versions']:
if version_dict.get('id') == version:
snapshot = version_dict.get('type') == 'snapshot'
break
else:
reply('Minecraft version not found in assets, will try downloading anyway')
version_dict = None
version_text = 'Minecraft {} {}'.format('snapshot' if snapshot else 'version', version)
yield {
'version': version,
'is_snapshot': snapshot,
'version_text': version_text
}
old_version = self.version()
if override is None:
override = version == old_version
if version_dict is not None and 'url' in version_dict:
version_json = requests.get(version_dict['url']).json()
else:
version_json = None
# back up world in background
if make_backup:
backup_path = self.backup_path / 'pre-update' / '{}_{:%Y-%m-%d_%Hh%M}_{}_{}'.format(self.name, datetime.datetime.utcnow(), old_version, version)
backup_thread = threading.Thread(target=self.backup, kwargs={'reply': reply, 'path': backup_path})
backup_thread.start()
# get server jar
jar_path = CONFIG['paths']['jar'] / 'minecraft_server.{}.jar'.format(version)
if override and jar_path.exists():
jar_path.unlink()
if not jar_path.exists():
_download('https://s3.amazonaws.com/Minecraft.Download/versions/{0}/minecraft_server.{0}.jar'.format(version), local_filename=str(jar_path))
# get client jar
if 'clientVersions' in CONFIG['paths']:
with contextlib.suppress(FileExistsError):
(CONFIG['paths']['clientVersions'] / version).mkdir(parents=True)
_download('https://s3.amazonaws.com/Minecraft.Download/versions/{0}/{0}.jar'.format(version) if version_json is None else version_json['downloads']['client']['url'], local_filename=str(CONFIG['paths']['clientVersions'] / version / '{}.jar'.format(version)))
# wait for backup to finish
if make_backup:
yield 'Download finished. Waiting for backup to finish...'
backup_thread.join()
yield 'Backup finished. Stopping server...'
else:
yield 'Download finished. Stopping server...'
# stop server
was_running = self.status()
if was_running:
self.say('Server will be upgrading to ' + version_text + ' and therefore restart')
time.sleep(5)
self.stop(reply=reply, log_path=log_path)
yield 'Server stopped. Installing new server...'
# install new server
if self.service_path.exists():
self.service_path.unlink()
self.service_path.symlink_to(CONFIG['paths']['jar'] / 'minecraft_server.{}.jar'.format(version))
client_jar_path = CONFIG['paths']['home'] / 'home' / 'client.jar'
# update Mapcrafter textures
if self.is_main:
if client_jar_path.exists():
client_jar_path.unlink()
client_jar_path.symlink_to(CONFIG['paths']['clientVersions'] / version / '{}.jar'.format(version))
if CONFIG['updateMapcrafterTextures']:
try:
subprocess.check_call(['mapcrafter_textures.py', str(CONFIG['paths']['clientVersions'] / version / '{}.jar'.format(version)), '/usr/local/share/mapcrafter/textures'])
except Exception as e:
reply('Error while updating mapcrafter textures: {}'.format(e))
# restart server
if was_running:
self.start(reply=reply, start_message='Server updated. Restarting...', log_path=log_path)
def kill(self, reply=print):
"""Kills a non responding minecraft server using the PID saved in the PID file."""
with self.pidfile_path.open("r") as pidfile:
pid = int(pidfile.read())
reply("World '" + self.name + "': Sending SIGTERM to PID " + str(pid) + " and waiting 60 seconds for shutdown...")
try:
os.kill(pid, signal.SIGTERM)
for _ in range(60):
live = self.pidrunning(pid)
if not live:
reply("Terminated world '" + self.name + "'")
break
time.sleep(1)
else:
reply("Could not terminate with SIGQUIT. Sending SIGKILL to PID " + str(pid) + "...")
os.kill(pid, signal.SIGKILL)
except ProcessLookupError:
reply("Process does not exist. Cleaning up...")
finally:
self.cleanup(reply)
return not self.status()
@property
def path(self):
return CONFIG['paths']['worlds'] / self.name
@property
def pid(self):
try:
with self.pidfile_path.open("r") as pidfile:
return int(pidfile.read())
except FileNotFoundError:
return None
def pidrunning(self, pid):
try:
os.kill(pid, 0)
return True
except ProcessLookupError:
return False
except PermissionError:
# Process exists but you can't send signals
return True
def pidstatus(self, reply=print):
if self.pidfile_path.exists() and self.pid is not None:
if self.pidrunning(self.pid):
return True
elif self.pidfile_path.exists():
reply("PID file exists but process is terminated. Cleaning up...")
self.cleanup(reply)
return False
@property
def pidfile_path(self):
return CONFIG['paths']['pidfiles'] / (self.name + ".pid")
def restart(self, *args, **kwargs):
reply = kwargs.get('reply', print)
if not self.stop(*args, **kwargs):
return False
kwargs['start_message'] = kwargs.get('start_message', 'Server stopped. Restarting...')
return self.start(*args, **kwargs)
def revert(self, path_or_version=None, snapshot=False, *, log_path=None, make_backup=True, override=False, reply=print):
"""Revert to a different version of Minecraft and restore a pre-update backup.
Optional arguments:
path_or_version -- If given, a pathlib.Path pointing at the backup file to be restored, or the Minecraft version to which to restore. By default, the newest available pre-update backup is restored.
snapshot -- If true, single-letter Minecraft versions will be expanded to include the current year and week number. Defaults to False.
Keyword-only arguments:
log_path -- This is passed to the stop function if the server is stopped before the revert.
make_backup -- Whether to back up the world before reverting. Defaults to True.
override -- If this is True and the server jar for the target version already exists, it will be deleted and redownloaded. Defaults to False.
reply -- This function is called several times with a string argument representing revert progress. Defaults to the built-in print function.
"""
# determine version and backup path
if path_or_version is None:
path = sorted((self.backup_path / 'pre-update').iterdir(), key=lambda path: path.stat().st_mtime, reverse=True)[0] # latest pre-update backup
version = path.name.split('_')[3]
elif isinstance(path_or_version, pathlib.Path):
path = path_or_version
version = path.name.split('_')[3]
else:
version = path_or_version
if snapshot and len(version) == 1:
version = datetime.datetime.utcnow().strftime('%yw%V') + version
path = next(path for path in sorted((self.backup_path / 'pre-update').iterdir(), key=lambda path: path.stat().st_mtime, reverse=True) if path.name.split('_')[3] == version)
# start iter_update
update_iterator = self.iter_update(version, log_path=log_path, make_backup=False, override=override, reply=reply)
version_dict = next(update_iterator)
reply('Downloading ' + version_dict['version_text'])
# make a backup to backup/<world>/reverted
if make_backup:
old_version = self.version()
backup_path = self.backup_path / 'reverted' / '{}_{:%Y-%m-%d_%Hh%M}_{}_{}'.format(self.name, datetime.datetime.utcnow(), old_version, version)
self.backup(reply=reply, path=backup_path, copy_to_latest=False)
# stop the server
was_running = self.status()
if was_running:
self.say('Server will be reverting to ' + version_dict["version_text"] + ' and therefore restart')
time.sleep(5)
self.stop(reply=reply, log_path=log_path)
reply('Server stopped. Restoring backup...')
# revert Minecraft version
for message in update_iterator:
reply(message)
# restore backup
world_path = self.world_path
if world_path.exists():
shutil.rmtree(str(world_path))
subprocess.call(['tar', '-C', str(self.path), '-xzf', str(path), world_path.name]) # untar tar the world backup
# restart server
if was_running:
self.start(reply=reply, start_message='Server reverted. Restarting...', log_path=log_path)
return version_dict['version'], version_dict['is_snapshot'], version_dict['version_text']
def save_off(self, announce=True, reply=print):
"""Turn off automatic world saves, then force-save once.
Optional arguments:
announce -- Whether to announce in-game that saves are being disabled.
reply -- This function is called with human-readable progress updates. Defaults to the built-in print function.
"""
if self.status():
reply('Minecraft is running... suspending saves')
if announce:
self.say('Server backup starting. Server going readonly...')
self.command('save-off')
self.command('save-all')
time.sleep(10)
os.sync()
else:
reply('Minecraft is not running. Not suspending saves.')
def save_on(self, announce=True, reply=print):
"""Enable automatic world saves.
Optional arguments:
announce -- Whether to announce in-game that saves are being enabled.
reply -- This function is called with human-readable progress updates. Defaults to the built-in print function.
"""
if self.status():
reply('Minecraft is running... re-enabling saves')
self.command('save-on')
if announce:
self.say('Server backup ended. Server going readwrite...')
else:
reply('Minecraft is not running. Not resuming saves.')
def say(self, message, prefix=True):
"""Broadcast a message in the world's in-game chat. This is a simple wrapper around the /say and /tellraw commands.
Required arguments:
message -- The message to display in chat.
Optional arguments:
prefix -- If False, uses /tellraw instead of /say to send a message without the [server] prefix. Defaults to True.
"""
if prefix:
self.command('say', [message])
else:
self.tellraw(message)
@property
def service_path(self):
return self.path / CONFIG['paths']['service']
@property
def socket_path(self):
return CONFIG['paths']['sockets'] / self.name
def start(self, *args, **kwargs):
def feed_commands(java_popen):
"""This function will run a loop to feed commands sent through the socket to minecraft"""
mypid = os.getpid()
loop_var = True
with socket.socket(socket.AF_UNIX) as s:
# Set 1 minute timeout so that the process actually exits (this is not crucial but we don't want to spam the system)
s.settimeout(60)
if self.socket_path.exists():
self.socket_path.unlink()
s.bind(str(self.socket_path))
while loop_var and self.socket_path.exists():
if not self.pidrunning(java_popen.pid):
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except:
pass
return
str_buffer = ''
try:
s.listen(1)
c, _ = s.accept()
while loop_var:
data = c.recv(4096)
if not data:
break
lines = (str_buffer + data.decode('utf-8')).split('\n')
for line in lines[:-1]:
if line == 'stop':
loop_var = False
break
java_popen.stdin.write(line.encode('utf-8') + b'\n')
java_popen.stdin.flush()
str_buffer = lines[-1]
try:
c.shutdown(socket.SHUT_RDWR)
c.close()
except:
pass
except (socket.timeout, socket.error):
continue
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except:
pass
java_popen.communicate(input=b'stop\n')
if self.socket_path.exists():
self.socket_path.unlink()
invocation = [
'java',
'-Xmx' + str(self.config['javaOptions']['maxHeap']) + 'M',
'-Xms' + str(self.config['javaOptions']['minHeap']) + 'M',
'-XX:+UseConcMarkSweepGC',
'-XX:ParallelGCThreads=' + str(self.config['javaOptions']['cpuCount']),
'-XX:+AggressiveOpts',
'-Dlog4j.configurationFile=' + str(CONFIG['paths']['logConfig']),
'-jar',
str(CONFIG['paths']['service'])
] + self.config['javaOptions']['jarOptions']
reply = kwargs.get('reply', print)
if self.status():
reply('Server is already running!')
return False
reply(kwargs.get('start_message', 'Starting Minecraft server...'))
if not self.socket_path.parent.exists():
# make sure the command sockets directory exists
self.socket_path.parent.mkdir(parents=True)
if not self.pidfile_path.parent.exists():
# make sure the pidfile directory exists
self.pidfile_path.parent.mkdir(parents=True)
java_popen = subprocess.Popen(invocation, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=str(self.path)) # start the java process
with self.pidfile_path.open("w+") as pidfile:
pidfile.write(str(java_popen.pid))
for line in loops.timeout_total(java_popen.stdout, datetime.timedelta(seconds=CONFIG['startTimeout'])): # wait until the timeout has been exceeded...
if re.match('[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} \\[Server thread/INFO\\]: Done \\([0-9]+.[0-9]+s\\)!', line.decode('utf-8')): # ...or the server has finished starting
break
_fork(feed_commands, java_popen) # feed commands from the socket to java
_fork(more_itertools.consume, java_popen.stdout) # consume java stdout to prevent deadlocking
if kwargs.get('log_path'):
with (kwargs['log_path'].open('a') if hasattr(kwargs['log_path'], 'open') else open(kwargs['log_path'], 'a')) as logins_log:
ver = self.version()
print(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + (' @restart' if ver is None else ' @start ' + ver), file=logins_log) # logs in UTC
# Wait for the socket listener to spin up
for _ in range(20):
if not self.status():
time.sleep(0.5)
else:
break
return self.status()
def status(self, reply=print):
return self.pidstatus(reply=reply) and self.socket_path.exists()
def stop(self, *args, **kwargs):
reply = kwargs.get('reply', print)
if self.status():
try:
reply('SERVER SHUTTING DOWN IN 10 SECONDS. Saving map...')
notice = kwargs.get('notice', 'SERVER SHUTTING DOWN IN 10 SECONDS. Saving map...')
if self.config['rconPassword'] is None:
reply('Cannot communicate with the world, missing RCON password! Killing...')
return self.kill()
if notice is not None:
self.say(str(notice))
self.command('save-all')
time.sleep(10)
self.command('stop')
time.sleep(7)
for _ in range(12):
if self.status():
time.sleep(5)
continue
else:
break
else:
reply('The server could not be stopped! Killing...')
return self.kill()
if kwargs.get('log_path'):
with (kwargs['log_path'].open('a') if hasattr(kwargs['log_path'], 'open') else open(kwargs['log_path'], 'a')) as logins_log:
print(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + ' @stop', file=logins_log) # logs in UTC
except ConnectionRefusedError:
reply("Can't communicate with the socket. We need to kill the server...")
return self.kill()
else:
reply('Minecraft server was not running.')
self.cleanup(reply=reply)
return not self.status()
def tellraw(self, message_dict, player='@a'):
if isinstance(message_dict, str):
message_dict = {'text': message_dict}
elif isinstance(message_dict, list):
message_dict = {'text': '', 'extra': message_dict}
try:
import api.util2
except ImportError:
pass # no support for Player objects
else:
if isinstance(player, api.util2.Player):
player = player.data['minecraft']['nicks'][-1]
self.command('tellraw', [player, json.dumps(message_dict)])
def update(self, version=None, snapshot=False, *, log_path=None, make_backup=True, override=False, reply=print):
"""Download a different version of Minecraft and restart the server if it is running.
Optional arguments:
version -- If given, a version with this name will be downloaded. By default, the newest available version is downloaded.
snapshot -- If version is given, this specifies whether the version is a development version. If no version is given, this specifies whether the newest stable version or the newest development version should be downloaded. Defaults to False.
Keyword-only arguments:
log_path -- This is passed to the stop function if the server is stopped before the update.
make_backup -- Whether to back up the world before updating. Defaults to True.
override -- If this is True and the server jar for the target version already exists, it will be deleted and redownloaded. Defaults to False.
reply -- This function is called several times with a string argument representing update progress. Defaults to the built-in print function.
Returns:
The new version, a boolean indicating whether or not the new version is a snapshot (or pre-release), and the full name of the new version.
Raises:
NotImplementedError -- For worlds with custom servers.
"""
if self.config['customServer']:
raise NotImplementedError('Update is not implemented for worlds with custom servers')
update_iterator = self.iter_update(version=version, snapshot=snapshot, log_path=log_path, make_backup=make_backup, override=override, reply=reply)
version_dict = next(update_iterator)
reply('Downloading ' + version_dict['version_text'])
for message in update_iterator:
reply(message)
return version_dict['version'], version_dict['is_snapshot'], version_dict['version_text']
def update_whitelist(self, people_file=None):
# get wanted whitelist from people file
if people_file is None:
people = people.get_people_db().obj_dump(version=3)
else:
with open(str(people_file)) as people_fobj:
people = json.load(people_fobj)['people']
whitelist = []
additional = self.config['whitelist']['additional']
if not self.config['whitelist']['ignorePeople']:
for person in people:
if not ('minecraft' in person or 'minecraftUUID' in person):
continue
if person.get('status', 'later') not in ['founding', 'later', 'postfreeze']:
continue
if person.get('minecraftUUID'):
uuid = person['minecraftUUID'] if isinstance(person['minecraftUUID'], str) else format(person['minecraftUUID'], 'x')
if 'minecraft' in person:
name = person['minecraft']
else:
name = requests.get('https://api.mojang.com/user/profiles/{}/names'.format(uuid)).json()[-1]['name']
else:
response_json = requests.get('https://api.mojang.com/users/profiles/minecraft/{}'.format(person['minecraft'])).json()
uuid = response_json['id']
name = response_json['name']
if '-' not in uuid:
uuid = uuid[:8] + '-' + uuid[8:12] + '-' + uuid[12:16] + '-' + uuid[16:20] + '-' + uuid[20:]
whitelist.append({
'name': name,
'uuid': uuid
})
# write whitelist
whitelist_path = self.path / 'whitelist.json'
with whitelist_path.open('a'):
os.utime(str(whitelist_path), None) # touch the file
with whitelist_path.open('w') as whitelist_json:
json.dump(whitelist, whitelist_json, sort_keys=True, indent=4, separators=(',', ': '))
# apply changes to whitelist files
self.command('whitelist', ['reload'])
# add people with unknown UUIDs to new whitelist using the command
for name in additional:
self.command('whitelist', ['add', name])
# update people file
try:
import lazyjson
except ImportError:
return
try:
with whitelist_path.open() as whitelist_json:
whitelist = json.load(whitelist_json)
except ValueError:
return
people = lazyjson.File(CONFIG['paths']['people'])
for whitelist_entry in whitelist:
for person in people['people']:
if person.get('minecraftUUID') == whitelist_entry['uuid']:
if 'minecraft' in person and person.get('minecraft') != whitelist_entry['name'] and person.get('minecraft') not in person.get('minecraft_previous', []):
if 'minecraft_previous' in person:
person['minecraft_previous'].append(person['minecraft'])
else:
person['minecraft_previous'] = [person['minecraft']]
person['minecraft'] = whitelist_entry['name']
elif person.get('minecraft') == whitelist_entry['name'] and 'minecraftUUID' not in person:
person['minecraftUUID'] = whitelist_entry['uuid']
def version(self):
"""Returns the version of Minecraft the world is currently configured to run. For worlds with custom servers, returns None instead.
"""
if self.config['customServer']:
return None
return self.service_path.resolve().stem[len('minecraft_server.'):]
@property
def world_path(self):
"""Returns the world save directory"""
result = self.path / 'world'
if not result.exists():
return self.path / self.name
return result
class MinecraftServerNotRunningError(Exception):
pass
def _command_output(cmd, args=[]):
p = subprocess.Popen([cmd] + args, stdout=subprocess.PIPE)
out, _ = p.communicate()
return out.decode('utf-8')
def _download(url, local_filename=None): #FROM http://stackoverflow.com/a/16696317/667338
if local_filename is None:
local_filename = url.split('#')[0].split('?')[0].split('/')[-1]
if local_filename == '':
raise ValueError('no local filename specified')
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
def _fork(func, *args, **kwargs):
#FROM http://stackoverflow.com/a/6011298/667338
# do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
# parent process, return and keep running
return
except OSError as e:
print('fork #1 failed: %d (%s)' % (e.errno, e.strerror), file=sys.stderr)
sys.exit(1)
os.setsid()
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as e:
print('fork #2 failed: %d (%s)' % (e.errno, e.strerror), file=sys.stderr)
sys.exit(1)
with open(os.path.devnull) as devnull:
sys.stdin = devnull
sys.stdout = devnull
func(*args, **kwargs) # do stuff
os._exit(os.EX_OK) # all done
def worlds():
"""Iterates over all configured worlds."""
for world_name in CONFIG['worlds'].keys():
yield World(world_name)
if __name__ == '__main__':
try:
expect_user = CONFIG["runUser"]
wurstmineberg_user = pwd.getpwnam(expect_user)
except:
sys.exit('[!!!!] User ‘{}’ does not exist!'.format(expect_user))
if os.geteuid() != wurstmineberg_user.pw_uid:
sys.exit('[!!!!] Only the user ‘{}’ may use this program!'.format(expect_user))
if arguments['--all'] or arguments['update-all']:
selected_worlds = worlds()
elif arguments['--enabled']:
selected_worlds = filter(lambda world: world.config['enabled'], worlds())
elif arguments['<world>']:
selected_worlds = (World(world_name) for world_name in arguments['<world>'])
else:
selected_worlds = [World()]
if arguments['kill']:
for world in selected_worlds:
if world.pidstatus():
world.kill()
else:
sys.exit('[WARN] Could not kill the "{}" world, PID file does not exist.'.format(world))
elif arguments['start']:
for world in selected_worlds:
if not world.start():
sys.exit('[FAIL] Error! Could not start the {} world.'.format(world))
else:
print('[ ok ] Minecraft is now running.')
elif arguments['stop']:
for world in selected_worlds:
if not world.stop():
sys.exit('[FAIL] Error! Could not stop the {} world.'.format(world))
else:
print('[ ok ] Minecraft is stopped.')
elif arguments['restart']:
for world in selected_worlds:
if not world.restart():
sys.exit('[FAIL] Error! Could not restart the {} world.'.format(world))
else:
print('[ ok ] Minecraft is now running.')
elif arguments['update'] or arguments['update-all']:
for world in selected_worlds:
if arguments['snapshot']:
world.update(arguments['<snapshot-id>'], snapshot=True, make_backup=not arguments['--no-backup'])
elif arguments['<version>']:
world.update(arguments['<version>'], make_backup=not arguments['--no-backup'])
else:
world.update(snapshot=True)
elif arguments['revert']:
for world in selected_worlds:
if arguments['snapshot']:
world.revert(arguments['<snapshot-id>'], snapshot=True, make_backup=not arguments['--no-backup'])
elif arguments['<version>']:
world.revert(arguments['<version>'], make_backup=not arguments['--no-backup'])
else:
world.revert()
elif arguments['backup']:
for world in selected_worlds:
world.backup()
elif arguments['status']:
exit1 = False
for world in selected_worlds:
mcversion = "" if world.version() == "" else "(Minecraft {}) ".format(world.version())
if world.status():
print('[info] The "{}" world {}is running with PID {}.'.format(world, mcversion, world.pid))
else:
exit1 = True
if world.pidstatus():
print('[info] The "{}" world is running but the socket file does not exist. Please kill the world and restart.'.format(world))
else:
print('[info] The "{}" world {}is not running.'.format(world, mcversion))
if exit1:
sys.exit(1)
elif arguments['command']:
selected_worlds = list(selected_worlds)
for world in selected_worlds:
if len(selected_worlds) > 1:
print('[info] running command on {} world'.format(world))
cmdlog = world.command(arguments['<command>'][0], arguments['<command>'][1:])
for line in cmdlog.splitlines():
print(str(line))
elif arguments['saves']:
for world in selected_worlds:
if arguments['on']:
world.save_on()
elif arguments['off']:
world.save_off()
else:
raise NotImplementedError('Subcommand not implemented')
else:
raise NotImplementedError('Subcommand not implemented')
| mit | -1,580,113,770,325,115,400 | 45.52019 | 269 | 0.584146 | false |
davidoj/RL_Aggregation | Agents.py | 1 | 8730 | '''
Reinforcement learning agents.
David Johnston 2015
'''
import numpy as np
import collections
import numbers
import random
random.seed(1)
class OnlineAgent:
"""
Generic online agent class; executes e-greedy policy, looks up values
"""
def __init__(self,problem,epsilon=1e-1,tiles=False):
self.epsilon = epsilon
self.problem = problem
self.qValues = problem.getZeroQTable()
self.reset = self.problem.reset
if tiles:
self.getQValue = self.getQTile
else:
self.getQValue = self.getQDisc
def executePolicy(self, state ,tiebreak='first'):
qs = self.getQArray(state)
test = random.random()
if test < self.epsilon:
return random.choice(range(len(qs)))
elif tiebreak == 'first':
return np.where(qs==max(qs))[0][0]
elif tiebreak == 'random':
return random.choice(np.where(qs==max(qs))[0])
def episode(self,deltaMin=1e-3,timeout=int(1e5),decayAlpha=True):
'''
Runs an episode, updates q-values and returns the length of the episode.
'''
for i in range(timeout):
currentState = self.problem.getAgentState()
action = self.executePolicy(currentState)
self.preUpdate(currentState,action)
if self.problem.isEpisodic:
terminal, nextState, reward = self.problem.result(action)
if terminal:
self.update(currentState,nextState,action,reward,decayAlpha,
terminal=1)
self.problem.reset()
return i
else:
nextState, reward = self.problem.result(action)
self.update(currentState,nextState,action,reward,decayAlpha)
return i
def run_n_episodes(self,n,decayAlpha=False,timeout=int(1e5)):
e_lengths = []
e_avgs = np.zeros(int(np.log2(n)))
j = 1
for i in range(n):
l = self.episode(timeout=timeout,decayAlpha=decayAlpha)
if l<timeout:
e_lengths.append(l)
if i == 2**j:
s = min(1000,(len(e_lengths)+1)/2)
e_avgs[j-1]= np.average(e_lengths[-s:-1])
print(np.average(e_lengths[-s:-1]))
j += 1
else:
e_lengths.append(timeout)
self.reset()
print("Episode timed out {}".format(l))
return e_avgs
def getQDisc(self,state,action):
return self.qValues[state,action]
def getQTile(self,state,action):
return sum(self.qValues[state,action])
def getValue(self,state):
qValues = self.getQArray(state)
return max(qValues)
def getQArray(self,state):
return np.array([self.getQValue(state,a) for a in self.problem.actions])
class QAgent(OnlineAgent):
"""
Q-learning agent
"""
def __init__(self,problem,alpha=1e-1,
epsilon=1e-1):
OnlineAgent.__init__(self,problem,epsilon=epsilon)
self.alpha = problem.setAlpha(alpha)
self.counter = problem.getZeroQTable()
def update(self,state,nextState,action,reward,decayAlpha,terminal=0):
'''
Q-learning update. State is either an integer or list(array) of integers
'''
if terminal:
nextV = 0
else:
nextV = self.getValue(nextState)
currentQV = self.getQValue(state,action)
delta = reward - currentQV + self.problem.gamma*nextV
if decayAlpha:
alpha = self.alpha/(self.counter[state,action]+1)
else:
alpha = self.alpha
self.qValues[state,action] += alpha * delta
self.counter[state,action] += 1
def preUpdate(self,state,action):
return
class SarsaLambda(OnlineAgent):
"""
SARSA with eligibility traces
"""
def __init__(self,problem,alpha,lamda=0.5,policy='e-greedy',
epsilon=1e-1,debug=False):
OnlineAgent.__init__(self,problem,epsilon=epsilon)
self.alpha = problem.setAlpha(alpha)
self.e = problem.getZeroQTable()
self.counter = problem.getZeroQTable()
self.lamda = lamda
def reset(self):
self.problem.reset
self.e = problem.getZeroQTable()
def preUpdate(self,state,action):
self.e *= self.problem.gamma*self.lamda
for a in self.problem.actions:
if a == action:
self.e[state,a] = 1
else:
self.e[state,a] = 0
def update(self,state,nextState,action,reward,decayAlpha,terminal=0):
'''
Sarsa(Lambda) update
'''
nextAction = self.executePolicy(nextState,epsilon=self.epsilon)
if terminal:
nextV=0
else:
nextV = self.getQValue(nextState,nextAction)
delta = reward - self.getQValue(state,action)
delta += self.problem.gamma*nextV
if decayAlpha:
alpha = self.alpha*((self.counter[state]+1)**(-1))
else:
alpha = self.alpha
self.counter[state,action] += 1
self.qValues += delta*alpha*self.e
class VIAgent():
"""
Offline value iteration agent
"""
def __init__(self,problem, policy="e-greedy",epsilon=1e-1,timeout=int(1e6)):
'''
Must be initialised with a problem with known transition and reward matrices
'''
self.problem = problem
self.epsilon = epsilon
self.qValues = problem.getZeroQTable()
self.transitionMatrix = problem.transitions
self.rewardMatrix = problem.rewards
self.timeout = timeout
#if policy == "e-greedy":
self.policyMatrix = np.zeros(self.qValues.shape) + 1/self.qValues.shape[0]
def executePolicy(self, state, epsilon=1e-1,tiebreak='random'):
qs = self.getQArray(state)
test = random.random()
if test < epsilon:
return random.choice(range(len(qs)))
elif tiebreak == 'first':
return np.where(qs==max(qs))[0][0]
elif tiebreak == 'random':
return random.choice(np.where(qs==max(qs))[0])
def getQValue(self,state,action):
'''
Get Q(s,a). S may be either an integer of list of ints if
function approximation is used.
'''
if isinstance(state,collections.Container):
state=np.array(state)
return sum(self.qValues[state,action])
return self.qValues[state,action]
def getValue(self,state):
qValues = self.getQArray(state)
return max(qValues)
def getQArray(self,state):
return np.array([self.getQValue(state,a) for a in self.problem.actions])
def greedifyPolicy(self,epsilon=1e-1):
old_policy = self.policyMatrix
self.policyMatrix = np.full_like(self.policyMatrix,epsilon/self.qValues.shape[0])
for state, policy in enumerate(self.policyMatrix):
policy_choice = self.executePolicy(state,epsilon=0)
policy[policy_choice] += 1-epsilon
if (self.policyMatrix == old_policy).all():
return 1
else:
return 0
def VISweep(self):
while True:
self.evalPolicy()
if self.greedifyPolicy():
break
def evalPolicy(self, deltaMin=1e-5):
delta = float('inf')
counter = 0
while delta>deltaMin and counter<self.timeout:
delta = 0
for state, aValues in enumerate(self.qValues):
for action, action_value in enumerate(aValues):
temp = action_value
states = range(len(self.qValues))
new_values = [self.transitionMatrix[action,state,nstate]*
(self.rewardMatrix[action,state,nstate]+
self.problem.gamma*self.getValue(nstate))
for nstate in states ]
new_action_value = sum(new_values)
self.qValues[state,action] = new_action_value
delta = max(delta, abs(temp-new_action_value))
counter += 1
if counter >= self.timeout-1:
print("Value iteration did not converge, delta = {}".format(delta))
| mit | -4,746,742,530,396,633,000 | 28.493243 | 89 | 0.548454 | false |
SNeuhausen/training_management | models/resource_analysis/trainer_workload_analyzer.py | 1 | 4295 | # -*- coding: utf-8 -*-
from openerp import api, models
from openerp.addons.training_management.models.model_names import ModelNames
from openerp.addons.training_management.utils.date_utils import DateUtils
class TrainerWorkloadAnalyzer(models.AbstractModel):
_name = ModelNames.TRAINER_WORKLOAD_ANALYZER
@api.model
def compute_trainer_workload_data(self, start_date, end_date):
start_date, end_date = DateUtils.convert_to_dates(start_date, end_date)
first_week = DateUtils.get_monday_of_week(start_date)
last_week = DateUtils.get_friday_of_week(end_date)
trainer_workload_data = {
"weeks_to_display": [],
"trainer_info": {},
"workloads": {},
"workload_totals": {},
}
current_week = first_week
while current_week <= last_week:
year_week = DateUtils.build_year_week_string_from_date(current_week)
trainer_workload_data["weeks_to_display"].append(year_week)
current_week += DateUtils.ONE_WEEK_TIME_DELTA
partner_model = self.env[ModelNames.PARTNER]
trainers = partner_model.search([("is_trainer", "=", True)])
for trainer in trainers:
trainer_id = str(trainer.id)
trainer_workload_data["workloads"][trainer_id] = {}
self._add_trainer_info(trainer_workload_data, trainer)
resources = self._find_resources_in_range_having_trainer(first_week, last_week, trainers)
self._update_trainer_workload_data_from_resources(resources, trainer_workload_data)
workloads = trainer_workload_data["workloads"]
for trainer_id, trainer_workload in workloads.iteritems():
lesson_total = sum(trainer_workload.values())
trainer_workload_data["workload_totals"][trainer_id] = lesson_total;
return trainer_workload_data
@staticmethod
def _add_trainer_info(trainer_workload_data, trainer):
trainer_info = trainer_workload_data["trainer_info"]
trainer_id = str(trainer.id)
if trainer_id not in trainer_info:
trainer_info[trainer_id] = {}
trainer_info[trainer_id].update({
"color_name": trainer.color_name,
"name": u"{surname}, {forename}".format(surname=trainer.surname, forename=trainer.forename),
})
def _update_trainer_workload_data_from_resources(self, resources, trainer_workload_data):
for resource in resources:
if not resource.trainer_id:
continue
trainer_id = str(resource.trainer_id.id)
year_week = resource.year_week_string
workloads = trainer_workload_data["workloads"]
if trainer_id not in workloads:
workloads[trainer_id] = {}
self._add_trainer_info(trainer_workload_data, resource.trainer_id)
trainer_workload = workloads[trainer_id]
if year_week not in trainer_workload:
trainer_workload[year_week] = 0
trainer_workload[year_week] += resource.get_lesson_count()
def _find_resources_in_range_having_trainer(self, start_date, end_date, trainers):
resource_model = self.env[ModelNames.RESOURCE]
domain = [
("date", ">=", DateUtils.convert_to_string(start_date)),
("date", "<=", DateUtils.convert_to_string(end_date)),
("trainer_id", "in", trainers.ids),
]
return resource_model.search(domain)
@api.model
@api.returns("self")
def find_trainers_with_main_location(self, main_location_id):
trainer_model = self.env[ModelNames.TRAINER]
domain = [
("is_trainer", "=", True),
("main_location_id", "=", main_location_id)
]
trainers = trainer_model.search(domain)
return trainers
def _find_trainers_for_user_locations(self):
location_model = self.env[ModelNames.LOCATION]
trainer_model = self.env[ModelNames.TRAINER]
user_locations = location_model.search([("user_ids", "in", [self.env.user.id])])
domain = [
("is_trainer", "=", True),
("main_location_id", "in", user_locations.ids)
]
trainers = trainer_model.search(domain)
return trainers
| gpl-3.0 | -8,989,901,224,261,650,000 | 41.524752 | 104 | 0.618859 | false |
petrblahos/pyramid_locmako | pyramid_locmako/scaffolds/__init__.py | 1 | 1383 | import subprocess
import sys
from pyramid.scaffolds import PyramidTemplate
class LocmakoTemplate(PyramidTemplate):
_template_dir = 'pyramid_locmako'
summary = 'pyramid project with Mako and Localization'
def post(self, command, output_dir, vars):
print "=== POST", command, output_dir, vars
subprocess.call([ sys.executable, "setup.py", "extract_messages" ], cwd=output_dir)
while 1:
lc = raw_input("Language to initialize: (enter to skip)")
if not lc:
break
if 2!=len(lc) or not lc.isalpha():
print "sorry, need 2 letters, nothing more"
continue
subprocess.call([ sys.executable, "setup.py", "init_catalog", "-l", lc ], cwd=output_dir)
subprocess.call([ sys.executable, "setup.py", "update_catalog" ], cwd=output_dir)
subprocess.call([ sys.executable, "setup.py", "compile_catalog" ], cwd=output_dir)
return super(self.__class__, self).post(command, output_dir, vars)
def pre(self, command, output_dir, vars):
return super(self.__class__, self).pre(command, output_dir, vars)
def template_dir(self):
return super(self.__class__, self).template_dir()
def render_template(self, content, vars, filename=None):
return super(self.__class__, self).render_template(content, vars, filename)
| mit | -4,874,192,753,432,911,000 | 40.909091 | 101 | 0.62979 | false |
turbokongen/home-assistant | homeassistant/components/bond/config_flow.py | 1 | 4278 | """Config flow for Bond integration."""
import logging
from typing import Any, Dict, Optional
from aiohttp import ClientConnectionError, ClientResponseError
from bond_api import Bond
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_HOST,
CONF_NAME,
HTTP_UNAUTHORIZED,
)
from .const import CONF_BOND_ID
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA_USER = vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_ACCESS_TOKEN): str}
)
DATA_SCHEMA_DISCOVERY = vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str})
async def _validate_input(data: Dict[str, Any]) -> str:
"""Validate the user input allows us to connect."""
try:
bond = Bond(data[CONF_HOST], data[CONF_ACCESS_TOKEN])
version = await bond.version()
# call to non-version API is needed to validate authentication
await bond.devices()
except ClientConnectionError as error:
raise InputValidationError("cannot_connect") from error
except ClientResponseError as error:
if error.status == HTTP_UNAUTHORIZED:
raise InputValidationError("invalid_auth") from error
raise InputValidationError("unknown") from error
except Exception as error:
_LOGGER.exception("Unexpected exception")
raise InputValidationError("unknown") from error
# Return unique ID from the hub to be stored in the config entry.
bond_id = version.get("bondid")
if not bond_id:
raise InputValidationError("old_firmware")
return bond_id
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Bond."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
_discovered: dict = None
async def async_step_zeroconf(
self, discovery_info: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by zeroconf discovery."""
name: str = discovery_info[CONF_NAME]
host: str = discovery_info[CONF_HOST]
bond_id = name.partition(".")[0]
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured({CONF_HOST: host})
self._discovered = {
CONF_HOST: host,
CONF_BOND_ID: bond_id,
}
self.context.update({"title_placeholders": self._discovered})
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Handle confirmation flow for discovered bond hub."""
errors = {}
if user_input is not None:
data = user_input.copy()
data[CONF_HOST] = self._discovered[CONF_HOST]
try:
return await self._try_create_entry(data)
except InputValidationError as error:
errors["base"] = error.base
return self.async_show_form(
step_id="confirm",
data_schema=DATA_SCHEMA_DISCOVERY,
errors=errors,
description_placeholders=self._discovered,
)
async def async_step_user(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
return await self._try_create_entry(user_input)
except InputValidationError as error:
errors["base"] = error.base
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA_USER, errors=errors
)
async def _try_create_entry(self, data: Dict[str, Any]) -> Dict[str, Any]:
bond_id = await _validate_input(data)
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=bond_id, data=data)
class InputValidationError(exceptions.HomeAssistantError):
"""Error to indicate we cannot proceed due to invalid input."""
def __init__(self, base: str):
"""Initialize with error base."""
super().__init__()
self.base = base
| apache-2.0 | 4,549,398,951,194,947,600 | 32.421875 | 78 | 0.63511 | false |
bicephale/OctoPrint | src/octoprint/settings.py | 1 | 44812 | # coding=utf-8
"""
This module represents OctoPrint's settings management. Within this module the default settings for the core
application are defined and the instance of the :class:`Settings` is held, which offers getter and setter
methods for the raw configuration values as well as various convenience methods to access the paths to base folders
of various types and the configuration file itself.
.. autodata:: default_settings
:annotation: = dict(...)
.. autodata:: valid_boolean_trues
.. autofunction:: settings
.. autoclass:: Settings
:members:
:undoc-members:
"""
from __future__ import absolute_import, division, print_function
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import sys
import os
import yaml
import yaml.parser
import logging
import re
import uuid
import copy
import time
from builtins import bytes
try:
from collections import ChainMap
except ImportError:
from chainmap import ChainMap
from octoprint.util import atomic_write, is_hidden_path, dict_merge
_APPNAME = "OctoPrint"
_instance = None
def settings(init=False, basedir=None, configfile=None):
"""
Factory method for initially constructing and consecutively retrieving the :class:`~octoprint.settings.Settings`
singleton.test
Arguments:
init (boolean): A flag indicating whether this is the initial call to construct the singleton (True) or not
(False, default). If this is set to True and the plugin manager has already been initialized, a :class:`ValueError`
will be raised. The same will happen if the plugin manager has not yet been initialized and this is set to
False.
basedir (str): Path of the base directory for all of OctoPrint's settings, log files, uploads etc. If not set
the default will be used: ``~/.octoprint`` on Linux, ``%APPDATA%/OctoPrint`` on Windows and
``~/Library/Application Support/OctoPrint`` on MacOS.
configfile (str): Path of the configuration file (``config.yaml``) to work on. If not set the default will
be used: ``<basedir>/config.yaml`` for ``basedir`` as defined above.
Returns:
Settings: The fully initialized :class:`Settings` instance.
Raises:
ValueError: ``init`` is True but settings are already initialized or vice versa.
"""
global _instance
if _instance is not None:
if init:
raise ValueError("Settings Manager already initialized")
else:
if init:
_instance = Settings(configfile=configfile, basedir=basedir)
else:
raise ValueError("Settings not initialized yet")
return _instance
default_settings = {
"serial": {
"port": None,
"baudrate": None,
"autoconnect": False,
"log": False,
"timeout": {
"detection": 0.5,
"connection": 10,
"communication": 30,
"temperature": 5,
"temperatureTargetSet": 2,
"sdStatus": 1
},
"maxCommunicationTimeouts": {
"idle": 2,
"printing": 5,
"long": 5
},
"maxWritePasses": 5,
"additionalPorts": [],
"additionalBaudrates": [],
"longRunningCommands": ["G4", "G28", "G29", "G30", "G32", "M400", "M226", "M600"],
"checksumRequiringCommands": ["M110"],
"helloCommand": "M110 N0",
"disconnectOnErrors": True,
"ignoreErrorsFromFirmware": False,
"logResends": True,
"supportResendsWithoutOk": False,
# command specific flags
"triggerOkForM29": True
},
"server": {
"host": "0.0.0.0",
"port": 5000,
"firstRun": True,
"startOnceInSafeMode": False,
"seenWizards": {},
"secretKey": None,
"reverseProxy": {
"prefixHeader": None,
"schemeHeader": None,
"hostHeader": None,
"serverHeader": None,
"portHeader": None,
"prefixFallback": None,
"schemeFallback": None,
"hostFallback": None,
"serverFallback": None,
"portFallback": None
},
"uploads": {
"maxSize": 1 * 1024 * 1024 * 1024, # 1GB
"nameSuffix": "name",
"pathSuffix": "path"
},
"maxSize": 100 * 1024, # 100 KB
"commands": {
"systemShutdownCommand": None,
"systemRestartCommand": None,
"serverRestartCommand": None
},
"diskspace": {
"warning": 500 * 1024 * 1024, # 500 MB
"critical": 200 * 1024 * 1024, # 200 MB
},
"preemptiveCache": {
"exceptions": [],
"until": 7
}
},
"webcam": {
"stream": None,
"snapshot": None,
"ffmpeg": None,
"ffmpegThreads": 1,
"bitrate": "5000k",
"watermark": True,
"flipH": False,
"flipV": False,
"rotate90" : False,
"timelapse": {
"type": "off",
"options": {},
"postRoll": 0,
"fps": 25,
},
"cleanTmpAfterDays": 7
},
"gcodeViewer": {
"enabled": True,
"mobileSizeThreshold": 2 * 1024 * 1024, # 2MB
"sizeThreshold": 20 * 1024 * 1024, # 20MB
},
"gcodeAnalysis": {
"maxExtruders": 10,
"throttle_normalprio": 0.01,
"throttle_highprio": 0.0,
"throttle_lines": 100
},
"feature": {
"temperatureGraph": True,
"waitForStartOnConnect": False,
"alwaysSendChecksum": False,
"neverSendChecksum": False,
"sendChecksumWithUnknownCommands": False,
"unknownCommandsNeedAck": False,
"sdSupport": True,
"sdRelativePath": False,
"sdAlwaysAvailable": False,
"swallowOkAfterResend": True,
"repetierTargetTemp": False,
"externalHeatupDetection": True,
"supportWait": True,
"keyboardControl": True,
"pollWatched": False,
"ignoreIdenticalResends": False,
"identicalResendsCountdown": 7,
"supportFAsCommand": False,
"modelSizeDetection": True,
"firmwareDetection": True,
"printCancelConfirmation": True,
"blockWhileDwelling": False
},
"folder": {
"uploads": None,
"timelapse": None,
"timelapse_tmp": None,
"logs": None,
"virtualSd": None,
"watched": None,
"plugins": None,
"slicingProfiles": None,
"printerProfiles": None,
"scripts": None,
"translations": None,
"generated": None,
"data": None
},
"temperature": {
"profiles": [
{"name": "ABS", "extruder" : 210, "bed" : 100 },
{"name": "PLA", "extruder" : 180, "bed" : 60 }
],
"cutoff": 30
},
"printerProfiles": {
"default": None,
"defaultProfile": {}
},
"printerParameters": {
"pauseTriggers": [],
"defaultExtrusionLength": 5
},
"appearance": {
"name": "",
"color": "default",
"colorTransparent": False,
"defaultLanguage": "_default",
"showFahrenheitAlso": False,
"components": {
"order": {
"navbar": ["settings", "systemmenu", "plugin_announcements", "login"],
"sidebar": ["connection", "state", "files"],
"tab": ["temperature", "control", "gcodeviewer", "terminal", "timelapse"],
"settings": [
"section_printer", "serial", "printerprofiles", "temperatures", "terminalfilters", "gcodescripts",
"section_features", "features", "webcam", "accesscontrol", "gcodevisualizer", "api",
"section_octoprint", "server", "folders", "appearance", "logs", "plugin_pluginmanager", "plugin_softwareupdate", "plugin_announcements"
],
"usersettings": ["access", "interface"],
"wizard": ["access"],
"about": ["about", "supporters", "authors", "changelog", "license", "thirdparty", "plugin_pluginmanager"],
"generic": []
},
"disabled": {
"navbar": [],
"sidebar": [],
"tab": [],
"settings": [],
"usersettings": [],
"generic": []
}
}
},
"controls": [],
"system": {
"actions": []
},
"accessControl": {
"enabled": True,
"salt": None,
"userManager": "octoprint.users.FilebasedUserManager",
"userfile": None,
"autologinLocal": False,
"localNetworks": ["127.0.0.0/8"],
"autologinAs": None
},
"slicing": {
"enabled": True,
"defaultSlicer": "cura",
"defaultProfiles": None
},
"events": {
"enabled": True,
"subscriptions": []
},
"api": {
"enabled": True,
"key": None,
"allowCrossOrigin": False,
"apps": {}
},
"terminalFilters": [
{ "name": "Suppress temperature messages", "regex": "(Send: (N\d+\s+)?M105)|(Recv: ok (B|T\d*):)" },
{ "name": "Suppress SD status messages", "regex": "(Send: (N\d+\s+)?M27)|(Recv: SD printing byte)" },
{ "name": "Suppress wait responses", "regex": "Recv: wait"}
],
"plugins": {
"_disabled": []
},
"scripts": {
"gcode": {
"afterPrintCancelled": "; disable motors\nM84\n\n;disable all heaters\n{% snippet 'disable_hotends' %}\n{% snippet 'disable_bed' %}\n;disable fan\nM106 S0",
"snippets": {
"disable_hotends": "{% for tool in range(printer_profile.extruder.count) %}M104 T{{ tool }} S0\n{% endfor %}",
"disable_bed": "{% if printer_profile.heatedBed %}M140 S0\n{% endif %}"
}
}
},
"estimation": {
"printTime": {
"statsWeighingUntil": 0.5,
"validityRange": 0.15,
"forceDumbFromPercent": 0.3,
"forceDumbAfterMin": 30,
"stableThreshold": 60
}
},
"devel": {
"stylesheet": "css",
"cache": {
"enabled": True,
"preemptive": True
},
"webassets": {
"minify": False,
"bundle": True,
"clean_on_startup": True
},
"virtualPrinter": {
"enabled": False,
"okAfterResend": False,
"forceChecksum": False,
"numExtruders": 1,
"includeCurrentToolInTemps": True,
"includeFilenameInOpened": True,
"hasBed": True,
"repetierStyleTargetTemperature": False,
"repetierStyleResends": False,
"okBeforeCommandOutput": False,
"smoothieTemperatureReporting": False,
"extendedSdFileList": False,
"throttle": 0.01,
"waitOnLongMoves": False,
"rxBuffer": 64,
"txBuffer": 40,
"commandBuffer": 4,
"sendWait": True,
"waitInterval": 1.0,
"supportM112": True,
"echoOnM117": True,
"brokenM29": True,
"supportF": False,
"firmwareName": "Virtual Marlin 1.0",
"sharedNozzle": False,
"sendBusy": False,
"simulateReset": True,
"preparedOks": [],
"okFormatString": "ok"
}
}
}
"""The default settings of the core application."""
valid_boolean_trues = [True, "true", "yes", "y", "1"]
""" Values that are considered to be equivalent to the boolean ``True`` value, used for type conversion in various places."""
class NoSuchSettingsPath(BaseException):
pass
class InvalidSettings(BaseException):
def __init__(self, message, line=None, column=None, details=None):
self.message = message
self.line = line
self.column = column
self.details = details
class HierarchicalChainMap(ChainMap):
def deep_dict(self, root=None):
if root is None:
root = self
result = dict()
for key, value in root.items():
if isinstance(value, dict):
result[key] = self.deep_dict(root=self.__class__._get_next(key, root))
else:
result[key] = value
return result
def has_path(self, path, only_local=False, only_defaults=False):
if only_defaults:
current = self.parents
elif only_local:
current = self.__class__(self.maps[0])
else:
current = self
try:
for key in path[:-1]:
value = current[key]
if isinstance(value, dict):
current = self.__class__._get_next(key, current, only_local=only_local)
else:
return False
return path[-1] in current
except KeyError:
return False
def get_by_path(self, path, only_local=False, only_defaults=False):
if only_defaults:
current = self.parents
elif only_local:
current = self.__class__(self.maps[0])
else:
current = self
for key in path[:-1]:
value = current[key]
if isinstance(value, dict):
current = self.__class__._get_next(key, current, only_local=only_local)
else:
raise KeyError(key)
return current[path[-1]]
def set_by_path(self, path, value):
current = self
for key in path[:-1]:
if key not in current.maps[0]:
current.maps[0][key] = dict()
if not isinstance(current[key], dict):
raise KeyError(key)
current = self.__class__._hierarchy_for_key(key, current)
current[path[-1]] = value
def del_by_path(self, path):
if not path:
raise ValueError("Invalid path")
current = self
for key in path[:-1]:
if not isinstance(current[key], dict):
raise KeyError(key)
current = self.__class__._hierarchy_for_key(key, current)
del current[path[-1]]
@classmethod
def _hierarchy_for_key(cls, key, chain):
wrapped_mappings = list()
for mapping in chain.maps:
if key in mapping and mapping[key] is not None:
wrapped_mappings.append(mapping[key])
else:
wrapped_mappings.append(dict())
return HierarchicalChainMap(*wrapped_mappings)
@classmethod
def _get_next(cls, key, node, only_local=False):
if isinstance(node, dict):
return node[key]
elif only_local and not key in node.maps[0]:
raise KeyError(key)
else:
return cls._hierarchy_for_key(key, node)
class Settings(object):
"""
The :class:`Settings` class allows managing all of OctoPrint's settings. It takes care of initializing the settings
directory, loading the configuration from ``config.yaml``, persisting changes to disk etc and provides access
methods for getting and setting specific values from the overall settings structure via paths.
A general word on the concept of paths, since they play an important role in OctoPrint's settings management. A
path is basically a list or tuple consisting of keys to follow down into the settings (which are basically like
a ``dict``) in order to set or retrieve a specific value (or more than one). For example, for a settings
structure like the following::
serial:
port: "/dev/ttyACM0"
baudrate: 250000
timeouts:
communication: 20.0
temperature: 5.0
sdStatus: 1.0
connection: 10.0
server:
host: "0.0.0.0"
port: 5000
the following paths could be used:
========================================== ============================================================================
Path Value
========================================== ============================================================================
``["serial", "port"]`` ::
"/dev/ttyACM0"
``["serial", "timeout"]`` ::
communication: 20.0
temperature: 5.0
sdStatus: 1.0
connection: 10.0
``["serial", "timeout", "temperature"]`` ::
5.0
``["server", "port"]`` ::
5000
========================================== ============================================================================
However, these would be invalid paths: ``["key"]``, ``["serial", "port", "value"]``, ``["server", "host", 3]``.
"""
def __init__(self, configfile=None, basedir=None):
self._logger = logging.getLogger(__name__)
self._basedir = None
self._map = HierarchicalChainMap(dict(), default_settings)
self._config = None
self._dirty = False
self._dirty_time = 0
self._mtime = None
self._get_preprocessors = dict(
controls=self._process_custom_controls
)
self._set_preprocessors = dict()
self._init_basedir(basedir)
if configfile is not None:
self._configfile = configfile
else:
self._configfile = os.path.join(self._basedir, "config.yaml")
self.load(migrate=True)
apikey = self.get(["api", "key"])
if not apikey or apikey == "n/a":
self.set(["api", "key"], ''.join('%02X' % z for z in bytes(uuid.uuid4().bytes)))
self.save(force=True)
self._script_env = self._init_script_templating()
def _init_basedir(self, basedir):
if basedir is not None:
self._basedir = basedir
else:
self._basedir = _default_basedir(_APPNAME)
if not os.path.isdir(self._basedir):
os.makedirs(self._basedir)
def _get_default_folder(self, type):
folder = default_settings["folder"][type]
if folder is None:
folder = os.path.join(self._basedir, type.replace("_", os.path.sep))
return folder
def _init_script_templating(self):
from jinja2 import Environment, BaseLoader, ChoiceLoader, TemplateNotFound
from jinja2.nodes import Include
from jinja2.ext import Extension
from octoprint.util.jinja import FilteredFileSystemLoader
class SnippetExtension(Extension):
tags = {"snippet"}
fields = Include.fields
def parse(self, parser):
node = parser.parse_include()
if not node.template.value.startswith("/"):
node.template.value = "snippets/" + node.template.value
return node
class SettingsScriptLoader(BaseLoader):
def __init__(self, s):
self._settings = s
def get_source(self, environment, template):
parts = template.split("/")
if not len(parts):
raise TemplateNotFound(template)
script = self._settings.get(["scripts"], merged=True)
for part in parts:
if isinstance(script, dict) and part in script:
script = script[part]
else:
raise TemplateNotFound(template)
source = script
if source is None:
raise TemplateNotFound(template)
mtime = self._settings._mtime
return source, None, lambda: mtime == self._settings.last_modified
def list_templates(self):
scripts = self._settings.get(["scripts"], merged=True)
return self._get_templates(scripts)
def _get_templates(self, scripts):
templates = []
for key in scripts:
if isinstance(scripts[key], dict):
templates += map(lambda x: key + "/" + x, self._get_templates(scripts[key]))
elif isinstance(scripts[key], basestring):
templates.append(key)
return templates
class SelectLoader(BaseLoader):
def __init__(self, default, mapping, sep=":"):
self._default = default
self._mapping = mapping
self._sep = sep
def get_source(self, environment, template):
if self._sep in template:
prefix, name = template.split(self._sep, 1)
if not prefix in self._mapping:
raise TemplateNotFound(template)
return self._mapping[prefix].get_source(environment, name)
return self._default.get_source(environment, template)
def list_templates(self):
return self._default.list_templates()
class RelEnvironment(Environment):
def __init__(self, prefix_sep=":", *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self._prefix_sep = prefix_sep
def join_path(self, template, parent):
prefix, name = self._split_prefix(template)
if name.startswith("/"):
return self._join_prefix(prefix, name[1:])
else:
_, parent_name = self._split_prefix(parent)
parent_base = parent_name.split("/")[:-1]
return self._join_prefix(prefix, "/".join(parent_base) + "/" + name)
def _split_prefix(self, template):
if self._prefix_sep in template:
return template.split(self._prefix_sep, 1)
else:
return "", template
def _join_prefix(self, prefix, template):
if len(prefix):
return prefix + self._prefix_sep + template
else:
return template
path_filter = lambda path: not is_hidden_path(path)
file_system_loader = FilteredFileSystemLoader(self.getBaseFolder("scripts"),
path_filter=path_filter)
settings_loader = SettingsScriptLoader(self)
choice_loader = ChoiceLoader([file_system_loader, settings_loader])
select_loader = SelectLoader(choice_loader,
dict(bundled=settings_loader,
file=file_system_loader))
return RelEnvironment(loader=select_loader, extensions=[SnippetExtension])
def _get_script_template(self, script_type, name, source=False):
from jinja2 import TemplateNotFound
template_name = script_type + "/" + name
try:
if source:
template_name, _, _ = self._script_env.loader.get_source(self._script_env, template_name)
return template_name
else:
return self._script_env.get_template(template_name)
except TemplateNotFound:
return None
except:
self._logger.exception("Exception while trying to resolve template {template_name}".format(**locals()))
return None
def _get_scripts(self, script_type):
return self._script_env.list_templates(filter_func=lambda x: x.startswith(script_type+"/"))
def _process_custom_controls(self, controls):
def process_control(c):
# shallow copy
result = dict(c)
if "regex" in result and "template" in result:
# if it's a template matcher, we need to add a key to associate with the matcher output
import hashlib
key_hash = hashlib.md5()
key_hash.update(result["regex"])
result["key"] = key_hash.hexdigest()
template_key_hash = hashlib.md5()
template_key_hash.update(result["template"])
result["template_key"] = template_key_hash.hexdigest()
elif "children" in result:
# if it has children we need to process them recursively
result["children"] = map(process_control, [child for child in result["children"] if child is not None])
return result
return map(process_control, controls)
@property
def effective(self):
return self._map.deep_dict()
@property
def effective_yaml(self):
import yaml
return yaml.safe_dump(self.effective)
@property
def effective_hash(self):
import hashlib
hash = hashlib.md5()
hash.update(self.effective_yaml)
return hash.hexdigest()
@property
def config_yaml(self):
import yaml
return yaml.safe_dump(self._config)
@property
def config_hash(self):
import hashlib
hash = hashlib.md5()
hash.update(self.config_yaml)
return hash.hexdigest()
@property
def _config(self):
return self._map.maps[0]
@_config.setter
def _config(self, value):
self._map.maps[0] = value
@property
def _overlay_maps(self):
if len(self._map.maps) > 2:
return self._map.maps[1:-1]
else:
return []
@property
def _default_map(self):
return self._map.maps[-1]
@property
def last_modified(self):
"""
Returns:
int: The last modification time of the configuration file.
"""
stat = os.stat(self._configfile)
return stat.st_mtime
@property
def last_modified_or_made_dirty(self):
return max(self.last_modified, self._dirty_time)
#~~ load and save
def load(self, migrate=False):
if os.path.exists(self._configfile) and os.path.isfile(self._configfile):
with open(self._configfile, "r") as f:
try:
self._config = yaml.safe_load(f)
self._mtime = self.last_modified
except yaml.YAMLError as e:
details = e.message
if hasattr(e, "problem_mark"):
line = e.problem_mark.line
column = e.problem_mark.column
else:
line = None
column = None
raise InvalidSettings("Invalid YAML file: {}".format(self._configfile),
details=details,
line=line,
column=column)
except:
raise
# changed from else to handle cases where the file exists, but is empty / 0 bytes
if not self._config:
self._config = dict()
if migrate:
self._migrate_config()
def load_overlay(self, overlay, migrate=True):
config = None
if callable(overlay):
try:
overlay = overlay(self)
except:
self._logger.exception("Error loading overlay from callable")
return
if isinstance(overlay, basestring):
if os.path.exists(overlay) and os.path.isfile(overlay):
with open(overlay, "r") as f:
config = yaml.safe_load(f)
elif isinstance(overlay, dict):
config = overlay
else:
raise ValueError("Overlay must be either a path to a yaml file or a dictionary")
if not isinstance(config, dict):
raise ValueError("Configuration data must be a dict but is a {}".format(config.__class__))
if migrate:
self._migrate_config(config)
return config
def add_overlay(self, overlay, at_end=False):
if at_end:
pos = len(self._map.maps) - 1
self._map.maps.insert(pos, overlay)
else:
self._map.maps.insert(1, overlay)
def _migrate_config(self, config=None, persist=False):
if config is None:
config = self._config
persist = True
dirty = False
migrators = (
self._migrate_event_config,
self._migrate_reverse_proxy_config,
self._migrate_printer_parameters,
self._migrate_gcode_scripts,
self._migrate_core_system_commands
)
for migrate in migrators:
dirty = migrate(config) or dirty
if dirty and persist:
self.save(force=True)
def _migrate_gcode_scripts(self, config):
"""
Migrates an old development version of gcode scripts to the new template based format.
"""
dirty = False
if "scripts" in config:
if "gcode" in config["scripts"]:
if "templates" in config["scripts"]["gcode"]:
del config["scripts"]["gcode"]["templates"]
replacements = dict(
disable_steppers="M84",
disable_hotends="{% snippet 'disable_hotends' %}",
disable_bed="M140 S0",
disable_fan="M106 S0"
)
for name, script in config["scripts"]["gcode"].items():
self.saveScript("gcode", name, script.format(**replacements))
del config["scripts"]
dirty = True
return dirty
def _migrate_printer_parameters(self, config):
"""
Migrates the old "printer > parameters" data structure to the new printer profile mechanism.
"""
default_profile = config["printerProfiles"]["defaultProfile"] if "printerProfiles" in config and "defaultProfile" in config["printerProfiles"] else dict()
dirty = False
if "printerParameters" in config:
printer_parameters = config["printerParameters"]
if "movementSpeed" in printer_parameters or "invertAxes" in printer_parameters:
dirty = True
default_profile["axes"] = dict(x=dict(), y=dict(), z=dict(), e=dict())
if "movementSpeed" in printer_parameters:
for axis in ("x", "y", "z", "e"):
if axis in printer_parameters["movementSpeed"]:
default_profile["axes"][axis]["speed"] = printer_parameters["movementSpeed"][axis]
del config["printerParameters"]["movementSpeed"]
if "invertedAxes" in printer_parameters:
for axis in ("x", "y", "z", "e"):
if axis in printer_parameters["invertedAxes"]:
default_profile["axes"][axis]["inverted"] = True
del config["printerParameters"]["invertedAxes"]
if "numExtruders" in printer_parameters or "extruderOffsets" in printer_parameters:
dirty = True
if not "extruder" in default_profile:
default_profile["extruder"] = dict()
if "numExtruders" in printer_parameters:
default_profile["extruder"]["count"] = printer_parameters["numExtruders"]
del config["printerParameters"]["numExtruders"]
if "extruderOffsets" in printer_parameters:
extruder_offsets = []
for offset in printer_parameters["extruderOffsets"]:
if "x" in offset and "y" in offset:
extruder_offsets.append((offset["x"], offset["y"]))
default_profile["extruder"]["offsets"] = extruder_offsets
del config["printerParameters"]["extruderOffsets"]
if "bedDimensions" in printer_parameters:
dirty = True
bed_dimensions = printer_parameters["bedDimensions"]
if not "volume" in default_profile:
default_profile["volume"] = dict()
if "circular" in bed_dimensions and "r" in bed_dimensions and bed_dimensions["circular"]:
default_profile["volume"]["formFactor"] = "circular"
default_profile["volume"]["width"] = 2 * bed_dimensions["r"]
default_profile["volume"]["depth"] = default_profile["volume"]["width"]
elif "x" in bed_dimensions or "y" in bed_dimensions:
default_profile["volume"]["formFactor"] = "rectangular"
if "x" in bed_dimensions:
default_profile["volume"]["width"] = bed_dimensions["x"]
if "y" in bed_dimensions:
default_profile["volume"]["depth"] = bed_dimensions["y"]
del config["printerParameters"]["bedDimensions"]
if dirty:
if not "printerProfiles" in config:
config["printerProfiles"] = dict()
config["printerProfiles"]["defaultProfile"] = default_profile
return dirty
def _migrate_reverse_proxy_config(self, config):
"""
Migrates the old "server > baseUrl" and "server > scheme" configuration entries to
"server > reverseProxy > prefixFallback" and "server > reverseProxy > schemeFallback".
"""
if "server" in config.keys() and ("baseUrl" in config["server"] or "scheme" in config["server"]):
prefix = ""
if "baseUrl" in config["server"]:
prefix = config["server"]["baseUrl"]
del config["server"]["baseUrl"]
scheme = ""
if "scheme" in config["server"]:
scheme = config["server"]["scheme"]
del config["server"]["scheme"]
if not "reverseProxy" in config["server"] or not isinstance(config["server"]["reverseProxy"], dict):
config["server"]["reverseProxy"] = dict()
if prefix:
config["server"]["reverseProxy"]["prefixFallback"] = prefix
if scheme:
config["server"]["reverseProxy"]["schemeFallback"] = scheme
self._logger.info("Migrated reverse proxy configuration to new structure")
return True
else:
return False
def _migrate_event_config(self, config):
"""
Migrates the old event configuration format of type "events > gcodeCommandTrigger" and
"event > systemCommandTrigger" to the new events format.
"""
if "events" in config.keys() and ("gcodeCommandTrigger" in config["events"] or "systemCommandTrigger" in config["events"]):
self._logger.info("Migrating config (event subscriptions)...")
# migrate event hooks to new format
placeholderRe = re.compile("%\((.*?)\)s")
eventNameReplacements = {
"ClientOpen": "ClientOpened",
"TransferStart": "TransferStarted"
}
payloadDataReplacements = {
"Upload": {"data": "{file}", "filename": "{file}"},
"Connected": {"data": "{port} at {baudrate} baud"},
"FileSelected": {"data": "{file}", "filename": "{file}"},
"TransferStarted": {"data": "{remote}", "filename": "{remote}"},
"TransferDone": {"data": "{remote}", "filename": "{remote}"},
"ZChange": {"data": "{new}"},
"CaptureStart": {"data": "{file}"},
"CaptureDone": {"data": "{file}"},
"MovieDone": {"data": "{movie}", "filename": "{gcode}"},
"Error": {"data": "{error}"},
"PrintStarted": {"data": "{file}", "filename": "{file}"},
"PrintDone": {"data": "{file}", "filename": "{file}"},
}
def migrateEventHook(event, command):
# migrate placeholders
command = placeholderRe.sub("{__\\1}", command)
# migrate event names
if event in eventNameReplacements:
event = eventNameReplacements["event"]
# migrate payloads to more specific placeholders
if event in payloadDataReplacements:
for key in payloadDataReplacements[event]:
command = command.replace("{__%s}" % key, payloadDataReplacements[event][key])
# return processed tuple
return event, command
disableSystemCommands = False
if "systemCommandTrigger" in config["events"] and "enabled" in config["events"]["systemCommandTrigger"]:
disableSystemCommands = not config["events"]["systemCommandTrigger"]["enabled"]
disableGcodeCommands = False
if "gcodeCommandTrigger" in config["events"] and "enabled" in config["events"]["gcodeCommandTrigger"]:
disableGcodeCommands = not config["events"]["gcodeCommandTrigger"]["enabled"]
disableAllCommands = disableSystemCommands and disableGcodeCommands
newEvents = {
"enabled": not disableAllCommands,
"subscriptions": []
}
if "systemCommandTrigger" in config["events"] and "subscriptions" in config["events"]["systemCommandTrigger"]:
for trigger in config["events"]["systemCommandTrigger"]["subscriptions"]:
if not ("event" in trigger and "command" in trigger):
continue
newTrigger = {"type": "system"}
if disableSystemCommands and not disableAllCommands:
newTrigger["enabled"] = False
newTrigger["event"], newTrigger["command"] = migrateEventHook(trigger["event"], trigger["command"])
newEvents["subscriptions"].append(newTrigger)
if "gcodeCommandTrigger" in config["events"] and "subscriptions" in config["events"]["gcodeCommandTrigger"]:
for trigger in config["events"]["gcodeCommandTrigger"]["subscriptions"]:
if not ("event" in trigger and "command" in trigger):
continue
newTrigger = {"type": "gcode"}
if disableGcodeCommands and not disableAllCommands:
newTrigger["enabled"] = False
newTrigger["event"], newTrigger["command"] = migrateEventHook(trigger["event"], trigger["command"])
newTrigger["command"] = newTrigger["command"].split(",")
newEvents["subscriptions"].append(newTrigger)
config["events"] = newEvents
self._logger.info("Migrated %d event subscriptions to new format and structure" % len(newEvents["subscriptions"]))
return True
else:
return False
def _migrate_core_system_commands(self, config):
"""
Migrates system commands for restart, reboot and shutdown as defined on OctoPi or
according to the official setup guide to new core system commands to remove
duplication.
If server commands for action is not yet set, migrates command. Otherwise only
deletes definition from custom system commands.
"""
changed = False
migration_map = dict(shutdown="systemShutdownCommand",
reboot="systemRestartCommand",
restart="serverRestartCommand")
if "system" in config and "actions" in config["system"]:
actions = config["system"]["actions"]
to_delete = []
for index, spec in enumerate(actions):
action = spec.get("action")
command = spec.get("command")
if action is None or command is None:
continue
migrate_to = migration_map.get(action)
if migrate_to is not None:
if not "server" in config or not "commands" in config["server"] or not migrate_to in config["server"]["commands"]:
if not "server" in config:
config["server"] = dict()
if not "commands" in config["server"]:
config["server"]["commands"] = dict()
config["server"]["commands"][migrate_to] = command
self._logger.info("Migrated {} action to server.commands.{}".format(action, migrate_to))
to_delete.append(index)
self._logger.info("Deleting {} action from configured system commands, superseeded by server.commands.{}".format(action, migrate_to))
for index in reversed(to_delete):
actions.pop(index)
changed = True
if changed:
# let's make a backup of our current config, in case someone wants to roll back to an
# earlier version and needs to recover the former system commands for that
backup_path = self.backup("system_command_migration")
self._logger.info("Made a copy of the current config at {} to allow recovery of manual system command configuration".format(backup_path))
return changed
def backup(self, suffix, path=None):
import shutil
if path is None:
path = os.path.dirname(self._configfile)
basename = os.path.basename(self._configfile)
name, ext = os.path.splitext(basename)
backup = os.path.join(path, "{}.{}{}".format(name, suffix, ext))
shutil.copy(self._configfile, backup)
return backup
def save(self, force=False):
if not self._dirty and not force:
return False
from octoprint.util import atomic_write
try:
with atomic_write(self._configfile, "wb", prefix="octoprint-config-", suffix=".yaml", permissions=0o600, max_permissions=0o666) as configFile:
yaml.safe_dump(self._config, configFile, default_flow_style=False, indent=" ", allow_unicode=True)
self._dirty = False
except:
self._logger.exception("Error while saving config.yaml!")
raise
else:
self.load()
return True
##~~ Internal getter
def _get_by_path(self, path, config):
current = config
for key in path:
if key not in current:
raise NoSuchSettingsPath()
current = current[key]
return current
def _get_value(self, path, asdict=False, config=None, defaults=None, preprocessors=None, merged=False, incl_defaults=True, do_copy=True):
if not path:
raise NoSuchSettingsPath()
if config is not None or defaults is not None:
if config is None:
config = self._config
if defaults is None:
defaults = dict(self._map.parents)
# mappings: provided config + any intermediary parents + provided defaults + regular defaults
mappings = [config] + self._overlay_maps + [defaults, self._default_map]
chain = HierarchicalChainMap(*mappings)
else:
chain = self._map
if preprocessors is None:
preprocessors = self._get_preprocessors
preprocessor = None
try:
preprocessor = self._get_by_path(path, preprocessors)
except NoSuchSettingsPath:
pass
parent_path = path[:-1]
last = path[-1]
if not isinstance(last, (list, tuple)):
keys = [last]
else:
keys = last
if asdict:
results = dict()
else:
results = list()
for key in keys:
try:
value = chain.get_by_path(parent_path + [key], only_local=not incl_defaults)
except KeyError:
raise NoSuchSettingsPath()
if isinstance(value, dict) and merged:
try:
default_value = chain.get_by_path(parent_path + [key], only_defaults=True)
if default_value is not None:
value = dict_merge(default_value, value)
except KeyError:
raise NoSuchSettingsPath()
if preprocessors is not None:
try:
preprocessor = self._get_by_path(path, preprocessors)
except:
pass
if callable(preprocessor):
value = preprocessor(value)
if do_copy:
value = copy.deepcopy(value)
if asdict:
results[key] = value
else:
results.append(value)
if not isinstance(last, (list, tuple)):
if asdict:
return results.values().pop()
else:
return results.pop()
else:
return results
#~~ has
def has(self, path, **kwargs):
try:
self._get_value(path, **kwargs)
except NoSuchSettingsPath:
return False
else:
return True
#~~ getter
def get(self, path, **kwargs):
error_on_path = kwargs.get("error_on_path", False)
new_kwargs = dict(kwargs)
if "error_on_path" in new_kwargs:
del new_kwargs["error_on_path"]
try:
return self._get_value(path, **new_kwargs)
except NoSuchSettingsPath:
if error_on_path:
raise
return None
def getInt(self, path, **kwargs):
value = self.get(path, **kwargs)
if value is None:
return None
try:
return int(value)
except ValueError:
self._logger.warn("Could not convert %r to a valid integer when getting option %r" % (value, path))
return None
def getFloat(self, path, **kwargs):
value = self.get(path, **kwargs)
if value is None:
return None
try:
return float(value)
except ValueError:
self._logger.warn("Could not convert %r to a valid integer when getting option %r" % (value, path))
return None
def getBoolean(self, path, **kwargs):
value = self.get(path, **kwargs)
if value is None:
return None
if isinstance(value, bool):
return value
if isinstance(value, (int, float)):
return value != 0
if isinstance(value, (str, unicode)):
return value.lower() in valid_boolean_trues
return value is not None
def getBaseFolder(self, type, create=True):
if type not in default_settings["folder"].keys() + ["base"]:
return None
if type == "base":
return self._basedir
folder = self.get(["folder", type])
if folder is None:
folder = self._get_default_folder(type)
if not os.path.isdir(folder):
if create:
os.makedirs(folder)
else:
raise IOError("No such folder: {folder}".format(folder=folder))
return folder
def listScripts(self, script_type):
return map(lambda x: x[len(script_type + "/"):], filter(lambda x: x.startswith(script_type + "/"), self._get_scripts(script_type)))
def loadScript(self, script_type, name, context=None, source=False):
if context is None:
context = dict()
context.update(dict(script=dict(type=script_type, name=name)))
template = self._get_script_template(script_type, name, source=source)
if template is None:
return None
if source:
script = template
else:
try:
script = template.render(**context)
except:
self._logger.exception("Exception while trying to render script {script_type}:{name}".format(**locals()))
return None
return script
#~~ remove
def remove(self, path, config=None, error_on_path=False):
if not path:
if error_on_path:
raise NoSuchSettingsPath()
return
if config is not None:
mappings = [config] + self._overlay_maps + [self._default_map]
chain = HierarchicalChainMap(*mappings)
else:
chain = self._map
try:
chain.del_by_path(path)
self._dirty = True
self._dirty_time = time.time()
except KeyError:
if error_on_path:
raise NoSuchSettingsPath()
pass
#~~ setter
def set(self, path, value, force=False, defaults=None, config=None, preprocessors=None, error_on_path=False):
if not path:
if error_on_path:
raise NoSuchSettingsPath()
return
if self._mtime is not None and self.last_modified != self._mtime:
self.load()
if config is not None or defaults is not None:
if config is None:
config = self._config
if defaults is None:
defaults = dict(self._map.parents)
chain = HierarchicalChainMap(config, defaults)
else:
chain = self._map
if preprocessors is None:
preprocessors = self._set_preprocessors
preprocessor = None
try:
preprocessor = self._get_by_path(path, preprocessors)
except NoSuchSettingsPath:
pass
if callable(preprocessor):
value = preprocessor(value)
try:
current = chain.get_by_path(path)
except KeyError:
current = None
try:
default_value = chain.get_by_path(path, only_defaults=True)
except KeyError:
if error_on_path:
raise NoSuchSettingsPath()
default_value = None
in_local = chain.has_path(path, only_local=True)
in_defaults = chain.has_path(path, only_defaults=True)
if not force and in_defaults and in_local and default_value == value:
try:
chain.del_by_path(path)
self._dirty = True
self._dirty_time = time.time()
except KeyError:
if error_on_path:
raise NoSuchSettingsPath()
pass
elif force or (not in_local and in_defaults and default_value != value) or (in_local and current != value):
if value is None and in_local:
chain.del_by_path(path)
else:
chain.set_by_path(path, value)
self._dirty = True
self._dirty_time = time.time()
def setInt(self, path, value, **kwargs):
if value is None:
self.set(path, None, **kwargs)
return
try:
intValue = int(value)
except ValueError:
self._logger.warn("Could not convert %r to a valid integer when setting option %r" % (value, path))
return
self.set(path, intValue, **kwargs)
def setFloat(self, path, value, **kwargs):
if value is None:
self.set(path, None, **kwargs)
return
try:
floatValue = float(value)
except ValueError:
self._logger.warn("Could not convert %r to a valid integer when setting option %r" % (value, path))
return
self.set(path, floatValue, **kwargs)
def setBoolean(self, path, value, **kwargs):
if value is None or isinstance(value, bool):
self.set(path, value, **kwargs)
elif isinstance(value, basestring) and value.lower() in valid_boolean_trues:
self.set(path, True, **kwargs)
else:
self.set(path, False, **kwargs)
def setBaseFolder(self, type, path, force=False):
if type not in default_settings["folder"].keys():
return None
currentPath = self.getBaseFolder(type)
defaultPath = self._get_default_folder(type)
if (path is None or path == defaultPath) and "folder" in self._config.keys() and type in self._config["folder"].keys():
del self._config["folder"][type]
if not self._config["folder"]:
del self._config["folder"]
self._dirty = True
self._dirty_time = time.time()
elif (path != currentPath and path != defaultPath) or force:
if not "folder" in self._config.keys():
self._config["folder"] = {}
self._config["folder"][type] = path
self._dirty = True
self._dirty_time = time.time()
def saveScript(self, script_type, name, script):
script_folder = self.getBaseFolder("scripts")
filename = os.path.realpath(os.path.join(script_folder, script_type, name))
if not filename.startswith(os.path.realpath(script_folder)):
# oops, jail break, that shouldn't happen
raise ValueError("Invalid script path to save to: {filename} (from {script_type}:{name})".format(**locals()))
path, _ = os.path.split(filename)
if not os.path.exists(path):
os.makedirs(path)
with atomic_write(filename, "wb", max_permissions=0o666) as f:
f.write(script)
def _default_basedir(applicationName):
# taken from http://stackoverflow.com/questions/1084697/how-do-i-store-desktop-application-data-in-a-cross-platform-way-for-python
if sys.platform == "darwin":
import appdirs
return appdirs.user_data_dir(applicationName, "")
elif sys.platform == "win32":
return os.path.join(os.environ["APPDATA"], applicationName)
else:
return os.path.expanduser(os.path.join("~", "." + applicationName.lower()))
| agpl-3.0 | -6,578,733,404,892,013,000 | 28.695162 | 159 | 0.654207 | false |
ubuntu-core/snapcraft | snapcraft/internal/sources/_base.py | 1 | 5773 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import requests
import shutil
import subprocess
import sys
import snapcraft.internal.common
from snapcraft.internal.cache import FileCache
from snapcraft.internal.indicators import (
download_requests_stream,
download_urllib_source,
)
from ._checksum import split_checksum, verify_checksum
from . import errors
class Base:
def __init__(
self,
source,
source_dir,
source_tag=None,
source_commit=None,
source_branch=None,
source_depth=None,
source_checksum=None,
command=None,
):
self.source = source
self.source_dir = source_dir
self.source_tag = source_tag
self.source_commit = source_commit
self.source_branch = source_branch
self.source_depth = source_depth
self.source_checksum = source_checksum
self.source_details = None
self.command = command
self._checked = False
def check(self, target: str):
"""Check if pulled sources have changed since target was created.
:param str target: Path to target file.
"""
self._checked = True
return self._check(target)
def update(self):
"""Update pulled source.
:raises RuntimeError: If this function is called before `check()`.
"""
if not self._checked:
# This is programmer error
raise RuntimeError("source must be checked before it's updated")
self._update()
def _check(self, target: str):
"""Check if pulled sources have changed since target was created.
:param str target: Path to target file.
"""
raise errors.SourceUpdateUnsupportedError(self)
def _update(self):
"""Update pulled source."""
raise errors.SourceUpdateUnsupportedError(self)
def _run(self, command, **kwargs):
try:
subprocess.check_call(command, **kwargs)
except subprocess.CalledProcessError as e:
raise errors.SnapcraftPullError(command, e.returncode)
def _run_output(self, command, **kwargs):
try:
return (
subprocess.check_output(command, **kwargs)
.decode(sys.getfilesystemencoding())
.strip()
)
except subprocess.CalledProcessError as e:
raise errors.SnapcraftPullError(command, e.returncode)
class FileBase(Base):
def pull(self):
source_file = None
is_source_url = snapcraft.internal.common.isurl(self.source)
# First check if it is a url and download and if not
# it is probably locally referenced.
if is_source_url:
source_file = self.download()
else:
basename = os.path.basename(self.source)
source_file = os.path.join(self.source_dir, basename)
# We make this copy as the provisioning logic can delete
# this file and we don't want that.
try:
shutil.copy2(self.source, source_file)
except FileNotFoundError as exc:
raise errors.SnapcraftSourceNotFoundError(self.source) from exc
# Verify before provisioning
if self.source_checksum:
verify_checksum(self.source_checksum, source_file)
# We finally provision, but we don't clean the target so override-pull
# can actually have meaning when using these sources.
self.provision(self.source_dir, src=source_file, clean_target=False)
def download(self, filepath: str = None) -> str:
if filepath is None:
self.file = os.path.join(self.source_dir, os.path.basename(self.source))
else:
self.file = filepath
# First check if we already have the source file cached.
file_cache = FileCache()
if self.source_checksum:
algorithm, hash = split_checksum(self.source_checksum)
cache_file = file_cache.get(algorithm=algorithm, hash=hash)
if cache_file:
# We make this copy as the provisioning logic can delete
# this file and we don't want that.
shutil.copy2(cache_file, self.file)
return self.file
# If not we download and store
if snapcraft.internal.common.get_url_scheme(self.source) == "ftp":
download_urllib_source(self.source, self.file)
else:
try:
request = requests.get(self.source, stream=True, allow_redirects=True)
request.raise_for_status()
except requests.exceptions.RequestException as e:
raise errors.SnapcraftRequestError(message=e)
download_requests_stream(request, self.file)
# We verify the file if source_checksum is defined
# and we cache the file for future reuse.
if self.source_checksum:
algorithm, digest = verify_checksum(self.source_checksum, self.file)
file_cache.cache(filename=self.file, algorithm=algorithm, hash=hash)
return self.file
| gpl-3.0 | -2,380,685,263,678,600,000 | 34.417178 | 86 | 0.63208 | false |
looopTools/sw9-source | .waf-1.9.8-6657823688b736c1d1a4e2c4e8e198b4/waflib/Tools/waf_unit_test.py | 1 | 5372 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,sys
from waflib.TaskGen import feature,after_method,taskgen_method
from waflib import Utils,Task,Logs,Options
from waflib.Tools import ccroot
testlock=Utils.threading.Lock()
SCRIPT_TEMPLATE="""#! %(python)s
import subprocess, sys
cmd = %(cmd)r
# if you want to debug with gdb:
#cmd = ['gdb', '-args'] + cmd
env = %(env)r
status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
sys.exit(status)
"""
@feature('test')
@after_method('apply_link','process_use')
def make_test(self):
if not getattr(self,'link_task',None):
return
tsk=self.create_task('utest',self.link_task.outputs)
if getattr(self,'ut_str',None):
self.ut_run,lst=Task.compile_fun(self.ut_str,shell=getattr(self,'ut_shell',False))
tsk.vars=lst+tsk.vars
if getattr(self,'ut_cwd',None):
if isinstance(self.ut_cwd,str):
if os.path.isabs(self.ut_cwd):
self.ut_cwd=self.bld.root.make_node(self.ut_cwd)
else:
self.ut_cwd=self.path.make_node(self.ut_cwd)
else:
self.ut_cwd=tsk.inputs[0].parent
if not hasattr(self,'ut_paths'):
paths=[]
for x in self.tmp_use_sorted:
try:
y=self.bld.get_tgen_by_name(x).link_task
except AttributeError:
pass
else:
if not isinstance(y,ccroot.stlink_task):
paths.append(y.outputs[0].parent.abspath())
self.ut_paths=os.pathsep.join(paths)+os.pathsep
if not hasattr(self,'ut_env'):
self.ut_env=dct=dict(os.environ)
def add_path(var):
dct[var]=self.ut_paths+dct.get(var,'')
if Utils.is_win32:
add_path('PATH')
elif Utils.unversioned_sys_platform()=='darwin':
add_path('DYLD_LIBRARY_PATH')
add_path('LD_LIBRARY_PATH')
else:
add_path('LD_LIBRARY_PATH')
@taskgen_method
def add_test_results(self,tup):
Logs.debug("ut: %r",tup)
self.utest_result=tup
try:
self.bld.utest_results.append(tup)
except AttributeError:
self.bld.utest_results=[tup]
class utest(Task.Task):
color='PINK'
after=['vnum','inst']
vars=[]
def runnable_status(self):
if getattr(Options.options,'no_tests',False):
return Task.SKIP_ME
ret=super(utest,self).runnable_status()
if ret==Task.SKIP_ME:
if getattr(Options.options,'all_tests',False):
return Task.RUN_ME
return ret
def get_test_env(self):
return self.generator.ut_env
def post_run(self):
super(utest,self).post_run()
if getattr(Options.options,'clear_failed_tests',False)and self.waf_unit_test_results[1]:
self.generator.bld.task_sigs[self.uid()]=None
def run(self):
if hasattr(self.generator,'ut_run'):
return self.generator.ut_run(self)
self.ut_exec=getattr(self.generator,'ut_exec',[self.inputs[0].abspath()])
if getattr(self.generator,'ut_fun',None):
self.generator.ut_fun(self)
testcmd=getattr(self.generator,'ut_cmd',False)or getattr(Options.options,'testcmd',False)
if testcmd:
self.ut_exec=(testcmd%' '.join(self.ut_exec)).split(' ')
return self.exec_command(self.ut_exec)
def exec_command(self,cmd,**kw):
Logs.debug('runner: %r',cmd)
if getattr(Options.options,'dump_test_scripts',False):
global SCRIPT_TEMPLATE
script_code=SCRIPT_TEMPLATE%{'python':sys.executable,'env':self.get_test_env(),'cwd':self.get_cwd().abspath(),'cmd':cmd}
script_file=self.inputs[0].abspath()+'_run.py'
Utils.writef(script_file,script_code)
os.chmod(script_file,Utils.O755)
if Logs.verbose>1:
Logs.info('Test debug file written as %r'%script_file)
proc=Utils.subprocess.Popen(cmd,cwd=self.get_cwd().abspath(),env=self.get_test_env(),stderr=Utils.subprocess.PIPE,stdout=Utils.subprocess.PIPE)
(stdout,stderr)=proc.communicate()
self.waf_unit_test_results=tup=(self.inputs[0].abspath(),proc.returncode,stdout,stderr)
testlock.acquire()
try:
return self.generator.add_test_results(tup)
finally:
testlock.release()
def get_cwd(self):
return self.generator.ut_cwd
def summary(bld):
lst=getattr(bld,'utest_results',[])
if lst:
Logs.pprint('CYAN','execution summary')
total=len(lst)
tfail=len([x for x in lst if x[1]])
Logs.pprint('CYAN',' tests that pass %d/%d'%(total-tfail,total))
for(f,code,out,err)in lst:
if not code:
Logs.pprint('CYAN',' %s'%f)
Logs.pprint('CYAN',' tests that fail %d/%d'%(tfail,total))
for(f,code,out,err)in lst:
if code:
Logs.pprint('CYAN',' %s'%f)
def set_exit_code(bld):
lst=getattr(bld,'utest_results',[])
for(f,code,out,err)in lst:
if code:
msg=[]
if out:
msg.append('stdout:%s%s'%(os.linesep,out.decode('utf-8')))
if err:
msg.append('stderr:%s%s'%(os.linesep,err.decode('utf-8')))
bld.fatal(os.linesep.join(msg))
def options(opt):
opt.add_option('--notests',action='store_true',default=False,help='Exec no unit tests',dest='no_tests')
opt.add_option('--alltests',action='store_true',default=False,help='Exec all unit tests',dest='all_tests')
opt.add_option('--clear-failed',action='store_true',default=False,help='Force failed unit tests to run again next time',dest='clear_failed_tests')
opt.add_option('--testcmd',action='store',default=False,help='Run the unit tests using the test-cmd string'' example "--test-cmd="valgrind --error-exitcode=1'' %s" to run under valgrind',dest='testcmd')
opt.add_option('--dump-test-scripts',action='store_true',default=False,help='Create python scripts to help debug tests',dest='dump_test_scripts')
| mit | -4,228,622,893,590,671,400 | 36.566434 | 203 | 0.698436 | false |
Strubbl/matekate | refresh.py | 1 | 3413 | #!/usr/bin/python
import cgi
import logging
import urllib2
import simplejson
import os
import sys
try:
loglevel = sys.argv[1]
except IndexError:
loglevel = None
if loglevel == '-d':
logging.basicConfig(level=logging.DEBUG)
logging.debug('set logging lvl to debug')
scriptdir = os.path.dirname(os.path.abspath(__file__))
f = urllib2.urlopen('http://overpass-api.de/api/interpreter?data=[out:json];(node["drink:club-mate"~"."];>;way["drink:club-mate"~"."];>;);out;')
try:
json = simplejson.load(f)
except simplejson.JSONDecodeError, e:
print(e)
sys.exit(1)
f.close()
nodes = {}
counter = 0
with open(scriptdir + '/js/club-mate-data.js', 'w') as f:
logging.debug('enter file loop')
f.write('function mate_locations_populate(markers) {\n')
for e in json['elements']:
ide = e['id']
lat = e.get('lat', None)
lon = e.get('lon', None)
typ = e['type']
tags = e.get('tags', {})
logging.debug('Element id=%s type=%s tags=%s', ide, typ, tags)
for k in tags.keys():
tags[k] = cgi.escape(tags[k]).replace('"', '\\"')
if typ == 'node':
nodes[ide] = (lat,lon)
if typ == 'way':
lat, lon = nodes[e['nodes'][0]] # extract coordinate of first node
logging.debug('Element id=%s lat=%s lon=%s', ide, lat, lon)
if not lat or not lon:
logging.warn('Element id=%s has missing lat=%s or lon=%s', ide, lat, lon)
if 'name' in tags:
name = tags['name']
else:
name = '%s %s' % (typ, ide)
if tags.get('drink:club-mate') == None:
logging.debug('This node has no tag drink:club-mate at all')
continue
elif tags.get('drink:club-mate') == 'retail':
icon = "icon_retail"
elif tags.get('drink:club-mate') == 'served':
icon = "icon_served"
else:
icon = "icon_normal"
popup = '<b>%s</b> <a href=\\"http://openstreetmap.org/browse/%s/%s\\" target=\\"_blank\\">*</a><hr/>' % (name, typ, ide)
if 'addr:street' in tags:
popup += '%s %s<br/>' % (tags.get('addr:street', ''), tags.get('addr:housenumber', ''))
if 'addr:city' in tags:
popup += '%s %s<br/>' % (tags.get('addr:postcode', ''), tags.get('addr:city', ''))
if 'addr:country' in tags:
popup += '%s<br/>' % (tags.get('addr:country', ''))
popup += '<hr/>'
if 'opening_hours' in tags:
popup += 'opening hours: %s<br/>' % (tags['opening_hours'])
if 'contact:website' in tags:
popup += 'website: <a href=\\"%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['contact:website'], tags['contact:website'])
elif 'website' in tags:
popup += 'website: <a href=\\"%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['website'], tags['website'])
if 'contact:email' in tags:
popup += 'email: <a href=\\"mailto:%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['contact:email'], tags['contact:email'])
elif 'email' in tags:
popup += 'email: <a href=\\"mailto:%s\\" target=\\"_blank\\">%s</a><br/>' % (tags['email'], tags['email'])
if 'contact:phone' in tags:
popup += 'phone: %s<br/>' % (tags['contact:phone'])
elif 'phone' in tags:
popup += 'phone: %s<br/>' % (tags['phone'])
f.write(' markers.addLayer(L.marker([%s, %s], {"title": "%s", "icon": %s}).bindPopup("%s"));\n' % (lat, lon, name.encode('utf-8'), icon, popup.encode('utf-8')))
counter += 1
f.write('}\n')
logging.info('added %i elements to data file', counter)
sys.exit(0)
| gpl-3.0 | 2,305,478,928,946,069,800 | 33.13 | 165 | 0.572224 | false |
IBM-Security/ibmsecurity | ibmsecurity/isam/base/cluster/trace.py | 1 | 1483 | import logging
logger = logging.getLogger(__name__)
requires_model="Appliance"
try:
basestring
except NameError:
basestring = (str, bytes)
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve the tracing levels
"""
return isamAppliance.invoke_get("Retrieve the tracing levels",
"/isam/cluster/tracing/v1", requires_model=requires_model)
def _check(isamAppliance, dsc):
check_value,warnings = True,""
ret_obj = get(isamAppliance)
warnings = ret_obj['warnings']
if isinstance(dsc, basestring):
import ast
dsc = ast.literal_eval(dsc)
if 'dsc' in ret_obj['data']:
check_value = (ret_obj['data']['dsc']==dsc)
return check_value,warnings
else:
check_value=True
return check_value,warnings
def set(isamAppliance, dsc, check_mode=False, force=False):
"""
Updating the tracing levels
"""
check_value,warnings = _check(isamAppliance, dsc)
if force is True or check_value is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put(
"Updating the tracing levels",
"/isam/cluster/tracing/v1",
{
'dsc': dsc
}, requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)
| apache-2.0 | -4,713,544,688,860,106,000 | 25.963636 | 94 | 0.612272 | false |
gov-cjwaszczuk/notifications-admin | app/notify_client/notification_api_client.py | 1 | 2954 | from app.notify_client import _attach_current_user, NotifyAdminAPIClient
class NotificationApiClient(NotifyAdminAPIClient):
def __init__(self):
super().__init__("a" * 73, "b")
def init_app(self, app):
self.base_url = app.config['API_HOST_NAME']
self.service_id = app.config['ADMIN_CLIENT_USER_NAME']
self.api_key = app.config['ADMIN_CLIENT_SECRET']
def get_notifications_for_service(
self,
service_id,
job_id=None,
template_type=None,
status=None,
page=None,
page_size=None,
limit_days=None,
include_jobs=None,
include_from_test_key=None,
format_for_csv=None,
to=None,
):
params = {}
if page is not None:
params['page'] = page
if page_size is not None:
params['page_size'] = page_size
if template_type is not None:
params['template_type'] = template_type
if status is not None:
params['status'] = status
if include_jobs is not None:
params['include_jobs'] = include_jobs
if include_from_test_key is not None:
params['include_from_test_key'] = include_from_test_key
if format_for_csv is not None:
params['format_for_csv'] = format_for_csv
if to is not None:
params['to'] = to
if job_id:
return self.get(
url='/service/{}/job/{}/notifications'.format(service_id, job_id),
params=params
)
else:
if limit_days is not None:
params['limit_days'] = limit_days
return self.get(
url='/service/{}/notifications'.format(service_id),
params=params
)
def send_notification(self, service_id, *, template_id, recipient, personalisation, sender_id):
data = {
'template_id': template_id,
'to': recipient,
'personalisation': personalisation,
}
if sender_id:
data['sender_id'] = sender_id
data = _attach_current_user(data)
return self.post(url='/service/{}/send-notification'.format(service_id), data=data)
def get_notification(self, service_id, notification_id):
return self.get(url='/service/{}/notifications/{}'.format(service_id, notification_id))
def get_api_notifications_for_service(self, service_id):
ret = self.get_notifications_for_service(service_id, include_jobs=False, include_from_test_key=True)
return self.map_letters_to_accepted(ret)
@staticmethod
def map_letters_to_accepted(notifications):
for notification in notifications['notifications']:
if notification['notification_type'] == 'letter' and notification['status'] in ('created', 'sending'):
notification['status'] = 'accepted'
return notifications
| mit | 5,493,466,503,666,718,000 | 35.925 | 114 | 0.578876 | false |
rdezavalia/ansible | lib/ansible/module_utils/facts.py | 1 | 138802 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import shlex
import errno
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
try:
# python2
import ConfigParser as configparser
except ImportError:
# python3
import configparser
from ansible.module_utils.basic import get_all_subclasses
# py2 vs py3; replace with six via ziploader
try:
# python2
from StringIO import StringIO
except ImportError:
# python3
from io import StringIO
try:
# python2
from string import maketrans
except ImportError:
# python3
maketrans = str.maketrans # TODO: is this really identical?
try:
dict.iteritems
except AttributeError:
# Python 3
def iteritems(d):
return d.items()
else:
# Python 2
def iteritems(d):
return d.iteritems()
try:
# Python 2
long
except NameError:
# Python 3
long = int
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
# Check if we have SSLContext support
from ssl import create_default_context, SSLContext
del create_default_context
del SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
]
def __init__(self, module, load_on_init=True, cached_facts=None):
self.module = module
if not cached_facts:
self.facts = {}
else:
self.facts = cached_facts
### TODO: Eventually, these should all get moved to populate(). But
# some of the values are currently being used by other subclasses (for
# instance, os_family and distribution). Have to sort out what to do
# about those first.
if load_on_init:
self.get_platform_facts()
self.facts.update(Distribution(module).populate())
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_caps_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
self.get_python_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
getconf_bin = self.module.get_bin_path('getconf')
if getconf_bin:
rc, out, err = self.module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
data = out.split('\n')
self.facts['architecture'] = data[0]
else:
bootinfo_bin = self.module.get_bin_path('bootinfo')
rc, out, err = self.module.run_command([bootinfo_bin, '-p'])
data = out.split('\n')
self.facts['architecture'] = data[0]
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.split('\n')[0]
self.facts["machine_id"] = machine_id
def get_local_facts(self):
fact_path = self.module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
rc, out, err = self.module.run_command(fn)
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError:
# load raw ini
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except configparser.Error:
fact = "error loading fact - please check content"
else:
fact = {}
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError:
pass
def get_public_ssh_host_keys(self):
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
# list of directories to check for ssh keys
# used in the order listed here, the first one with keys is used
keydirs = ['/etc/ssh', '/etc/openssh', '/etc']
for keydir in keydirs:
for type_ in keytypes:
factname = 'ssh_host_key_%s_public' % type_
if factname in self.facts:
# a previous keydir was already successful, stop looking
# for keys
return
key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
keydata = get_file_content(key_filename)
if keydata is not None:
self.facts[factname] = keydata.split()[1]
def get_pkg_mgr_facts(self):
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
else:
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.exists(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
def get_service_mgr_facts(self):
#TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, runit, etc
# also other OSs other than linux might need to check across several possible candidates
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
rc, proc_1, err = self.module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
else:
proc_1 = os.path.basename(proc_1)
if proc_1 is not None:
proc_1 = proc_1.strip()
if proc_1 == 'init' or proc_1.endswith('sh'):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
self.facts['service_mgr'] = proc_1
# start with the easy ones
elif self.facts['distribution'] == 'MacOSX':
#FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
self.facts['service_mgr'] = 'launchd'
else:
self.facts['service_mgr'] = 'systemstarter'
elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']:
#FIXME: we might want to break out to individual BSDs
self.facts['service_mgr'] = 'bsdinit'
elif self.facts['system'] == 'AIX':
self.facts['service_mgr'] = 'src'
elif self.facts['system'] == 'SunOS':
#FIXME: smf?
self.facts['service_mgr'] = 'svcs'
elif self.facts['system'] == 'Linux':
if self.is_systemd_managed():
self.facts['service_mgr'] = 'systemd'
elif self.module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
self.facts['service_mgr'] = 'upstart'
elif os.path.realpath('/sbin/rc') == '/sbin/openrc':
self.facts['service_mgr'] = 'openrc'
elif os.path.exists('/etc/init.d/'):
self.facts['service_mgr'] = 'sysvinit'
if not self.facts.get('service_mgr', False):
# if we cannot detect, fallback to generic 'service'
self.facts['service_mgr'] = 'service'
def get_lsb_facts(self):
lsb_path = self.module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = self.module.run_command([lsb_path, "-a"])
if rc == 0:
self.facts['lsb'] = {}
for line in out.split('\n'):
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
for line in get_file_lines('/etc/lsb-release'):
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except OSError:
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except OSError:
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except OSError:
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except OSError:
self.facts['selinux']['type'] = 'unknown'
def get_caps_facts(self):
capsh_path = self.module.get_bin_path('capsh')
if capsh_path:
rc, out, err = self.module.run_command([capsh_path, "--print"])
enforced_caps = []
enforced = 'NA'
for line in out.split('\n'):
if len(line) < 1:
continue
if line.startswith('Current:'):
if line.split(':')[1].strip() == '=ep':
enforced = 'False'
else:
enforced = 'True'
enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
self.facts['system_capabilities_enforced'] = enforced
self.facts['system_capabilities'] = enforced_caps
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['weekday_number'] = now.strftime('%w')
self.facts['date_time']['weeknumber'] = now.strftime('%W')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
def is_systemd_managed(self):
# tools must be installed
if self.module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
pwent = pwd.getpwnam(getpass.getuser())
self.facts['user_uid'] = pwent.pw_uid
self.facts['user_gid'] = pwent.pw_gid
self.facts['user_gecos'] = pwent.pw_gecos
self.facts['user_dir'] = pwent.pw_dir
self.facts['user_shell'] = pwent.pw_shell
def get_env_facts(self):
self.facts['env'] = {}
for k,v in iteritems(os.environ):
self.facts['env'][k] = v
def get_dns_facts(self):
self.facts['dns'] = {}
for line in get_file_content('/etc/resolv.conf', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
if not 'nameservers' in self.facts['dns']:
self.facts['dns']['nameservers'] = []
for nameserver in tokens[1:]:
self.facts['dns']['nameservers'].append(nameserver)
elif tokens[0] == 'domain':
if len(tokens) > 1:
self.facts['dns']['domain'] = tokens[1]
elif tokens[0] == 'search':
self.facts['dns']['search'] = []
for suffix in tokens[1:]:
self.facts['dns']['search'].append(suffix)
elif tokens[0] == 'sortlist':
self.facts['dns']['sortlist'] = []
for address in tokens[1:]:
self.facts['dns']['sortlist'].append(address)
elif tokens[0] == 'options':
self.facts['dns']['options'] = {}
if len(tokens) > 1:
for option in tokens[1:]:
option_tokens = option.split(':', 1)
if len(option_tokens) == 0:
continue
val = len(option_tokens) == 2 and option_tokens[1] or True
self.facts['dns']['options'][option_tokens[0]] = val
def _get_mount_size_facts(self, mountpoint):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(mountpoint)
size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
except OSError:
pass
return size_total, size_available
def get_python_facts(self):
self.facts['python'] = {
'version': {
'major': sys.version_info[0],
'minor': sys.version_info[1],
'micro': sys.version_info[2],
'releaselevel': sys.version_info[3],
'serial': sys.version_info[4]
},
'version_info': list(sys.version_info),
'executable': sys.executable,
'has_sslcontext': HAS_SSLCONTEXT
}
try:
self.facts['python']['type'] = sys.subversion[0]
except AttributeError:
self.facts['python']['type'] = None
class Distribution(object):
"""
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existance and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
OSDIST_LIST = (
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/release', 'name': 'Solaris'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'SuSE'},
{'path': '/etc/SuSE-release', 'name': 'SuSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/os-release', 'name': 'NA'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT Linux',
}
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse'
)
def __init__(self, module):
self.system = platform.system()
self.facts = {}
self.module = module
def populate(self):
self.get_distribution_facts()
return self.facts
def get_distribution_facts(self):
# The platform module provides information about the running
# system/distribution. Use this as a baseline and fix buggy systems
# afterwards
self.facts['distribution'] = self.system
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'OpenBSD')
self.facts['distribution'] = self.system
if self.system in systems_implemented:
cleanedname = self.system.replace('-','')
distfunc = getattr(self, 'get_distribution_'+cleanedname)
distfunc()
elif self.system == 'Linux':
# try to find out which linux distribution this is
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
if not os.path.exists(path):
continue
# if allowempty is set, we only check for file existance but not content
if 'allowempty' in ddict and ddict['allowempty']:
self.facts['distribution'] = name
break
if os.path.getsize(path) == 0:
continue
data = get_file_content(path)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from platform.dist()
if self.SEARCH_STRING[name] in data:
# this sets distribution=RedHat if 'Red Hat' shows up in data
self.facts['distribution'] = name
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
self.facts['distribution'] = data.split()[0]
break
else:
# call a dedicated function for parsing the file content
try:
distfunc = getattr(self, 'get_distribution_' + name)
parsed = distfunc(name, data, path)
if parsed is None or parsed:
# distfunc return False if parsing failed
# break only if parsing was succesful
# otherwise continue with other distributions
break
except AttributeError:
# this should never happen, but if it does fail quitely and not with a traceback
pass
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
self.facts['os_family'] = self.facts['distribution']
distro = self.facts['distribution'].replace(' ', '_')
if distro in self.OS_FAMILY:
self.facts['os_family'] = self.OS_FAMILY[distro]
def get_distribution_AIX(self):
rc, out, err = self.module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
def get_distribution_HPUX(self):
rc, out, err = self.module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
def get_distribution_Darwin(self):
self.facts['distribution'] = 'MacOSX'
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
def get_distribution_OpenBSD(self):
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_version'] = match.groups()[0]
else:
self.facts['distribution_version'] = 'release'
def get_distribution_Slackware(self, name, data, path):
if 'Slackware' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.findall('\w+[.]\w+', data)
if version:
self.facts['distribution_version'] = version[0]
def get_distribution_Amazon(self, name, data, path):
if 'Amazon' not in data:
return False # TODO: remove
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
def get_distribution_OpenWrt(self, name, data, path):
if 'OpenWrt' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
def get_distribution_Alpine(self, name, data, path):
self.facts['distribution'] = 'Alpine'
self.facts['distribution_version'] = data
def get_distribution_Solaris(self, name, data, path):
data = data.split('\n')[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
return
uname_rc, uname_out, uname_err = self.module.run_command(['uname', '-v'])
distribution_version = None
if 'SmartOS' in data:
self.facts['distribution'] = 'SmartOS'
if os.path.exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
self.facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
self.facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_rc == 0 and 'NexentaOS_' in uname_out:
self.facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
self.facts['distribution_release'] = data.strip()
if distribution_version is not None:
self.facts['distribution_version'] = distribution_version
elif uname_rc == 0:
self.facts['distribution_version'] = uname_out.split('\n')[0].strip()
return
return False # TODO: remove if tested without this
def get_distribution_SuSE(self, name, data, path):
if 'suse' not in data.lower():
return False # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
self.facts['distribution_version'] = distribution_version.group(1)
if 'open' in data.lower():
release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
self.facts['distribution_release'] = release
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).split('\n')[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
self.facts['distribution'] = "SLES"
elif "Desktop" in data:
self.facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
self.facts['distribution_release'] = release.group(1)
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
def get_distribution_Debian(self, name, data, path):
if 'Debian' in data or 'Raspbian' in data:
self.facts['distribution'] = 'Debian'
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'Ubuntu' in data:
self.facts['distribution'] = 'Ubuntu'
pass # Ubuntu gets correct info from python functions
else:
return False # TODO: remove if tested without this
def get_distribution_Mandriva(self, name, data, path):
if 'Mandriva' in data:
self.facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
else:
return False
def get_distribution_NA(self, name, data, path):
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and self.facts['distribution'] == 'NA':
self.facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and self.facts['distribution_version'] == 'NA':
self.facts['distribution_version'] = version.group(1).strip('"')
def get_distribution_Coreos(self, name, data, path):
if self.facts['distribution'].lower() == 'coreos':
if not data:
# include fix from #15230, #15228
return
release = re.search("^GROUP=(.*)", data)
if release:
self.facts['distribution_release'] = release.group(1).strip('"')
else:
return False # TODO: remove if tested without this
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in get_all_subclasses(Hardware):
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
self.get_lvm_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = long(val) / 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
self.facts['memory_mb'] = {
'real' : {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache' : {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap' : {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
def get_cpu_facts(self):
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
if self.facts['architecture'] != 's390x':
if xen_paravirt:
self.facts['processor_count'] = i
self.facts['processor_cores'] = i
self.facts['processor_threads_per_core'] = 1
self.facts['processor_vcpus'] = i
else:
self.facts['processor_count'] = sockets and len(sockets) or i
self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
self.facts['processor_threads_per_core'] = ((cores.values() and
cores.values()[0] or 1) / self.facts['processor_cores'])
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
@timeout(10)
def get_mount_facts(self):
uuids = dict()
self.facts['mounts'] = []
bind_mounts = []
findmntPath = self.module.get_bin_path("findmnt")
if findmntPath:
rc, out, err = self.module.run_command("%s -lnur" % ( findmntPath ), use_unsafe_shell=True)
if rc == 0:
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.split('\n'):
fields = line.rstrip('\n').split()
if(len(fields) < 2):
continue
if(re.match(".*\]",fields[1])):
bind_mounts.append(fields[0])
mtab = get_file_content('/etc/mtab', '')
for line in mtab.split('\n'):
fields = line.rstrip('\n').split()
if fields[0].startswith('/') or ':/' in fields[0]:
if(fields[2] != 'none'):
size_total, size_available = self._get_mount_size_facts(fields[1])
if fields[0] in uuids:
uuid = uuids[fields[0]]
else:
uuid = 'NA'
lsblkPath = self.module.get_bin_path("lsblk")
if lsblkPath:
rc, out, err = self.module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True)
if rc == 0:
uuid = out.strip()
uuids[fields[0]] = uuid
if fields[1] in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not re.match(".*bind.*", fields[3]):
fields[3] += ",bind"
self.facts['mounts'].append(
{'mount': fields[1],
'device':fields[0],
'fstype': fields[2],
'options': fields[3],
# statvfs data
'size_total': size_total,
'size_available': size_available,
'uuid': uuid,
})
def get_device_facts(self):
self.facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'])
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = self.module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = self.module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
d['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
d['holders'].append(name)
else:
d['holders'].append(folder)
self.facts['devices'][diskname] = d
def get_uptime_facts(self):
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g'
vgs_path = self.module.get_bin_path('vgs')
#vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs={}
if vgs_path:
rc, vg_lines, err = self.module.run_command( '%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.split()
vgs[items[0]] = {'size_g':items[-2],
'free_g':items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
#lvs fields:
#LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command( '%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.split()
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
self.facts['lvm'] = {'lvs': lvs, 'vgs': vgs}
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.split('\n'):
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
for line in out.split('\n'):
if 'Memory size' in line:
self.facts['memtotal_mb'] = line.split()[2]
rc, out, err = self.module.run_command("/usr/sbin/swap -s")
allocated = long(out.split()[1][:-1])
reserved = long(out.split()[5][:-1])
used = long(out.split()[8][:-1])
free = long(out.split()[10][:-1])
self.facts['swapfree_mb'] = free / 1024
self.facts['swaptotal_mb'] = (free + used) / 1024
self.facts['swap_allocated_mb'] = allocated / 1024
self.facts['swap_reserved_mb'] = reserved / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
# For a detailed format description see mnttab(4)
# special mount_point fstype options time
fstab = get_file_content('/etc/mnttab')
if fstab:
for line in fstab.split('\n'):
fields = line.rstrip('\n').split('\t')
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4], 'size_total': size_total, 'size_available': size_available})
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
- devices
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self):
self.sysctl = self.get_sysctl()
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
self.get_mount_facts()
return self.facts
def get_sysctl(self):
rc, out, err = self.module.run_command(["/sbin/sysctl", "hw"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
(key, value) = line.split('=')
sysctl[key] = value.strip()
return sysctl
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
if fields[1] == 'none' or fields[3] == 'xx':
continue
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = self.module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = self.module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = maketrans(' ', ' ')
data = out.split()
self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
def get_processor_facts(self):
processor = []
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
i = 0
for line in dmesg_boot.splitlines():
if line.split(' ', 1)[0] == 'cpu%i:' % i:
processor.append(line.split(' ', 1)[1])
i = i + 1
processor_count = i
self.facts['processor'] = processor
self.facts['processor_count'] = processor_count
# I found no way to figure out the number of Cores per CPU in OpenBSD
self.facts['processor_cores'] = 'NA'
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
for line in dmesg_boot.split('\n'):
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = self.module.run_command("/sbin/sysctl vm.stats")
for line in out.split('\n'):
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = long(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = long(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = long(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -k")
lines = out.split('\n')
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
self.facts['swaptotal_mb'] = int(data[1]) / 1024
self.facts['swapfree_mb'] = int(data[3]) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class DragonFlyHardware(FreeBSDHardware):
pass
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines("/proc/cpuinfo"):
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.split('\n'):
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.split('\n'):
data = line.split()
if 'memory pages' in line:
pagecount = long(data[0])
if 'free pages' in line:
freecount = long(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.split('\n')
data = lines[1].split()
swaptotal_mb = long(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
class HPUX(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) / 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().split('\n'):
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = self.module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def populate(self):
self.sysctl = self.get_sysctl()
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_sysctl(self):
rc, out, err = self.module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if line.rstrip("\n"):
(key, value) = re.split(' = |: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = self.module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
rc, out, err = self.module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in get_all_subclasses(Network):
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
and self.facts['distribution_version'].startswith('4.'):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.split('\n')[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = get_file_content(os.path.join(path, 'address'), default='')
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
_type = get_file_content(os.path.join(path, 'type'))
if _type == '1':
interfaces[device]['type'] = 'ether'
elif _type == '512':
interfaces[device]['type'] = 'ppp'
elif _type == '772':
interfaces[device]['type'] = 'loopback'
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
if os.path.exists(os.path.join(path,'device')):
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(get_file_content(os.path.join(path, 'flags')),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.split('\n'):
if not line:
continue
words = line.split()
broadcast = ''
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
if len(words) > 3:
broadcast = words[3]
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['broadcast'] = broadcast
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = self.module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, stdout, stderr = self.module.run_command(args)
primary_data = stdout
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, stdout, stderr = self.module.run_command(args)
secondary_data = stdout
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
It currently does not define
- default_ipv4 and default_ipv6
- type, mtu and network on interfaces
"""
platform = 'Generic_BSD_Ifconfig'
def populate(self):
ifconfig_path = self.module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = self.module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
lines = out.split('\n')
for line in lines:
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreeBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if not 'interface' in defaults.keys():
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo.keys():
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
class HPUXNetwork(Network):
"""
HP-UX-specifig subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4 address information.
"""
platform = 'HP-UX'
def populate(self):
netstat_path = self.module.get_bin_path('netstat')
if netstat_path is None:
return self.facts
self.get_default_interfaces()
interfaces = self.get_interfaces_info()
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
return self.facts
def get_default_interfaces(self):
rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
lines = out.split('\n')
for line in lines:
words = line.split()
if len(words) > 1:
if words[0] == 'default':
self.facts['default_interface'] = words[4]
self.facts['default_gateway'] = words[1]
def get_interfaces_info(self):
interfaces = {}
rc, out, err = self.module.run_command("/usr/bin/netstat -ni")
lines = out.split('\n')
for line in lines:
words = line.split()
for i in range(len(words) - 1):
if words[i][:3] == 'lan':
device = words[i]
interfaces[device] = { 'device': device }
address = words[i+3]
interfaces[device]['ipv4'] = { 'address': address }
network = words[i+2]
interfaces[device]['ipv4'] = { 'network': network,
'interface': device,
'address': address }
return interfaces
class DarwinNetwork(GenericBsdIfconfigNetwork):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
# MacOSX sets the media to '<unknown type>' for bridge interface
# and parsing splits this into two words; this if/else helps
if words[1] == '<unknown' and words[2] == 'type>':
current_if['media_select'] = 'Unknown'
current_if['media_type'] = 'unknown type'
else:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class DragonFlyNetwork(GenericBsdIfconfigNetwork):
"""
This is the DragonFly Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'DragonFly'
class AIXNetwork(GenericBsdIfconfigNetwork):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
netstat_path = self.module.get_bin_path('netstat')
rc, out, err = self.module.run_command([netstat_path, '-nr'])
interface = dict(v4 = {}, v6 = {})
lines = out.split('\n')
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.split('\n'):
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
uname_path = self.module.get_bin_path('uname')
if uname_path:
rc, out, err = self.module.run_command([uname_path, '-W'])
# don't bother with wpars it does not work
# zero means not in wpar
if not rc and out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = self.module.get_bin_path('entstat')
if entstat_path:
rc, out, err = self.module.run_command([entstat_path, current_if['device'] ])
if rc != 0:
break
for line in out.split('\n'):
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = self.module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = self.module.run_command([lsattr_path,'-El', current_if['device'] ])
if rc != 0:
break
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
class SunOSNetwork(GenericBsdIfconfigNetwork):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = self.module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces.keys():
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in get_all_subclasses(Virtual):
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
def populate(self):
self.get_virtual_facts()
return self.facts
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
self.facts['virtualization_type'] = systemd_container
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
if rc == 0:
for line in out.split("\n"):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class FreeBSDVirtual(Virtual):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class DragonFlyVirtual(FreeBSDVirtual):
pass
class OpenBSDVirtual(Virtual):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = self.module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = self.module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
rc, out, err = self.module.run_command("/usr/sbin/prtdiag")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
# Check if it's a zone
if os.path.exists("/usr/bin/zonename"):
rc, out, err = self.module.run_command("/usr/bin/zonename")
if out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
rc, out, err = self.module.run_command("/usr/sbin/modinfo")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
# Detect domaining on Sparc hardware
if os.path.exists("/usr/sbin/virtinfo"):
# The output of virtinfo is different whether we are on a machine with logical
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
# The output contains multiple lines with different keys like this:
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
# virtinfo can only be run from the global zone
try:
for line in out.split('\n'):
fields = line.split('|')
if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
self.facts['virtualization_type'] = 'ldom'
self.facts['virtualization_role'] = 'guest'
hostfeatures = []
for field in fields[2:]:
arg = field.split('=')
if( arg[1] == 'true' ):
hostfeatures.append(arg[0])
if( len(hostfeatures) > 0 ):
self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
except ValueError:
pass
class Ohai(Facts):
"""
This is a subclass of Facts for including information gathered from Ohai.
"""
def populate(self):
self.run_ohai()
return self.facts
def run_ohai(self):
ohai_path = self.module.get_bin_path('ohai')
if ohai_path is None:
return
rc, out, err = self.module.run_command(ohai_path)
try:
self.facts.update(json.loads(out))
except:
pass
class Facter(Facts):
"""
This is a subclass of Facts for including information gathered from Facter.
"""
def populate(self):
self.run_facter()
return self.facts
def run_facter(self):
facter_path = self.module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = self.module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
if facter_path is None:
return
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = self.module.run_command(facter_path + " --puppet --json")
try:
self.facts = json.loads(out)
except:
pass
def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
try:
datafile = open(path)
data = datafile.read()
if strip:
data = data.strip()
if len(data) == 0:
data = default
finally:
datafile.close()
except:
# ignore errors as some jails/containers might have readable permissions but not allow reads to proc
# done in 2 blocks for 2.4 compat
pass
return data
def get_file_lines(path):
'''get list of lines from file'''
data = get_file_content(path)
if data:
ret = data.splitlines()
else:
ret = []
return ret
def ansible_facts(module, gather_subset):
facts = {}
facts['gather_subset'] = list(gather_subset)
facts.update(Facts(module).populate())
for subset in gather_subset:
facts.update(FACT_SUBSETS[subset](module,
load_on_init=False,
cached_facts=facts).populate())
return facts
def get_all_facts(module):
setup_options = dict(module_setup=True)
# Retrieve module parameters
gather_subset = module.params['gather_subset']
# Retrieve all facts elements
additional_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
additional_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys())))
if exclude:
exclude_subsets.add(subset)
else:
additional_subsets.add(subset)
if not additional_subsets:
additional_subsets.update(VALID_SUBSETS)
additional_subsets.difference_update(exclude_subsets)
# facter and ohai are given a different prefix than other subsets
if 'facter' in additional_subsets:
additional_subsets.difference_update(('facter',))
facter_ds = FACT_SUBSETS['facter'](module, load_on_init=False).populate()
if facter_ds:
for (k, v) in facter_ds.items():
setup_options['facter_%s' % k.replace('-', '_')] = v
if 'ohai' in additional_subsets:
additional_subsets.difference_update(('ohai',))
ohai_ds = FACT_SUBSETS['ohai'](module, load_on_init=False).populate()
if ohai_ds:
for (k, v) in ohai_ds.items():
setup_options['ohai_%s' % k.replace('-', '_')] = v
facts = ansible_facts(module, additional_subsets)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['_ansible_verbose_override'] = True
return setup_result
# Allowed fact subset for gather_subset options and what classes they use
# Note: have to define this at the bottom as it references classes defined earlier in this file
FACT_SUBSETS = dict(
hardware=Hardware,
network=Network,
virtual=Virtual,
ohai=Ohai,
facter=Facter,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
| gpl-3.0 | 6,228,155,176,280,810,000 | 41.395235 | 209 | 0.521059 | false |
NaohiroTamura/python-ironicclient | ironicclient/common/http.py | 1 | 25914 | # Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from distutils.version import StrictVersion
import functools
import hashlib
import logging
import os
import socket
import ssl
import textwrap
import time
from keystoneauth1 import adapter
from keystoneauth1 import exceptions as kexc
from oslo_serialization import jsonutils
from oslo_utils import strutils
import requests
import six
from six.moves import http_client
import six.moves.urllib.parse as urlparse
from ironicclient.common import filecache
from ironicclient.common.i18n import _
from ironicclient.common.i18n import _LE
from ironicclient.common.i18n import _LW
from ironicclient import exc
# NOTE(deva): Record the latest version that this client was tested with.
# We still have a lot of work to do in the client to implement
# microversion support in the client properly! See
# http://specs.openstack.org/openstack/ironic-specs/specs/kilo/api-microversions.html # noqa
# for full details.
DEFAULT_VER = '1.9'
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-ironicclient'
CHUNKSIZE = 1024 * 64 # 64kB
API_VERSION = '/v1'
API_VERSION_SELECTED_STATES = ('user', 'negotiated', 'cached', 'default')
DEFAULT_MAX_RETRIES = 5
DEFAULT_RETRY_INTERVAL = 2
SENSITIVE_HEADERS = ('X-Auth-Token',)
SUPPORTED_ENDPOINT_SCHEME = ('http', 'https')
def _trim_endpoint_api_version(url):
"""Trim API version and trailing slash from endpoint."""
return url.rstrip('/').rstrip(API_VERSION)
def _extract_error_json(body):
"""Return error_message from the HTTP response body."""
error_json = {}
try:
body_json = jsonutils.loads(body)
if 'error_message' in body_json:
raw_msg = body_json['error_message']
error_json = jsonutils.loads(raw_msg)
except ValueError:
pass
return error_json
def get_server(endpoint):
"""Extract and return the server & port that we're connecting to."""
if endpoint is None:
return None, None
parts = urlparse.urlparse(endpoint)
return parts.hostname, str(parts.port)
class VersionNegotiationMixin(object):
def negotiate_version(self, conn, resp):
"""Negotiate the server version
Assumption: Called after receiving a 406 error when doing a request.
param conn: A connection object
param resp: The response object from http request
"""
if self.api_version_select_state not in API_VERSION_SELECTED_STATES:
raise RuntimeError(
_('Error: self.api_version_select_state should be one of the '
'values in: "%(valid)s" but had the value: "%(value)s"') %
{'valid': ', '.join(API_VERSION_SELECTED_STATES),
'value': self.api_version_select_state})
min_ver, max_ver = self._parse_version_headers(resp)
# NOTE: servers before commit 32fb6e99 did not return version headers
# on error, so we need to perform a GET to determine
# the supported version range
if not max_ver:
LOG.debug('No version header in response, requesting from server')
if self.os_ironic_api_version:
base_version = ("/v%s" %
str(self.os_ironic_api_version).split('.')[0])
else:
base_version = API_VERSION
resp = self._make_simple_request(conn, 'GET', base_version)
min_ver, max_ver = self._parse_version_headers(resp)
# If the user requested an explicit version or we have negotiated a
# version and still failing then error now. The server could
# support the version requested but the requested operation may not
# be supported by the requested version.
if self.api_version_select_state == 'user':
raise exc.UnsupportedVersion(textwrap.fill(
_("Requested API version %(req)s is not supported by the "
"server or the requested operation is not supported by the "
"requested version. Supported version range is %(min)s to "
"%(max)s")
% {'req': self.os_ironic_api_version,
'min': min_ver, 'max': max_ver}))
if self.api_version_select_state == 'negotiated':
raise exc.UnsupportedVersion(textwrap.fill(
_("No API version was specified and the requested operation "
"was not supported by the client's negotiated API version "
"%(req)s. Supported version range is: %(min)s to %(max)s")
% {'req': self.os_ironic_api_version,
'min': min_ver, 'max': max_ver}))
negotiated_ver = str(min(StrictVersion(self.os_ironic_api_version),
StrictVersion(max_ver)))
if negotiated_ver < min_ver:
negotiated_ver = min_ver
# server handles microversions, but doesn't support
# the requested version, so try a negotiated version
self.api_version_select_state = 'negotiated'
self.os_ironic_api_version = negotiated_ver
LOG.debug('Negotiated API version is %s', negotiated_ver)
# Cache the negotiated version for this server
host, port = get_server(self.endpoint)
filecache.save_data(host=host, port=port, data=negotiated_ver)
return negotiated_ver
def _generic_parse_version_headers(self, accessor_func):
min_ver = accessor_func('X-OpenStack-Ironic-API-Minimum-Version',
None)
max_ver = accessor_func('X-OpenStack-Ironic-API-Maximum-Version',
None)
return min_ver, max_ver
def _parse_version_headers(self, accessor_func):
# NOTE(jlvillal): Declared for unit testing purposes
raise NotImplementedError()
def _make_simple_request(self, conn, method, url):
# NOTE(jlvillal): Declared for unit testing purposes
raise NotImplementedError()
_RETRY_EXCEPTIONS = (exc.Conflict, exc.ServiceUnavailable,
exc.ConnectionRefused, kexc.RetriableConnectionFailure)
def with_retries(func):
"""Wrapper for _http_request adding support for retries."""
@functools.wraps(func)
def wrapper(self, url, method, **kwargs):
if self.conflict_max_retries is None:
self.conflict_max_retries = DEFAULT_MAX_RETRIES
if self.conflict_retry_interval is None:
self.conflict_retry_interval = DEFAULT_RETRY_INTERVAL
num_attempts = self.conflict_max_retries + 1
for attempt in range(1, num_attempts + 1):
try:
return func(self, url, method, **kwargs)
except _RETRY_EXCEPTIONS as error:
msg = (_LE("Error contacting Ironic server: %(error)s. "
"Attempt %(attempt)d of %(total)d") %
{'attempt': attempt,
'total': num_attempts,
'error': error})
if attempt == num_attempts:
LOG.error(msg)
raise
else:
LOG.debug(msg)
time.sleep(self.conflict_retry_interval)
return wrapper
class HTTPClient(VersionNegotiationMixin):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.endpoint_trimmed = _trim_endpoint_api_version(endpoint)
self.auth_token = kwargs.get('token')
self.auth_ref = kwargs.get('auth_ref')
self.os_ironic_api_version = kwargs.get('os_ironic_api_version',
DEFAULT_VER)
self.api_version_select_state = kwargs.get(
'api_version_select_state', 'default')
self.conflict_max_retries = kwargs.pop('max_retries',
DEFAULT_MAX_RETRIES)
self.conflict_retry_interval = kwargs.pop('retry_interval',
DEFAULT_RETRY_INTERVAL)
self.session = requests.Session()
parts = urlparse.urlparse(endpoint)
if parts.scheme not in SUPPORTED_ENDPOINT_SCHEME:
msg = _('Unsupported scheme: %s') % parts.scheme
raise exc.EndpointException(msg)
if parts.scheme == 'https':
if kwargs.get('insecure') is True:
self.session.verify = False
elif kwargs.get('ca_file'):
self.session.verify = kwargs['ca_file']
self.session.cert = (kwargs.get('cert_file'),
kwargs.get('key_file'))
def _process_header(self, name, value):
"""Redacts any sensitive header
Redact a header that contains sensitive information, by returning an
updated header with the sha1 hash of that value. The redacted value is
prefixed by '{SHA1}' because that's the convention used within
OpenStack.
:returns: A tuple of (name, value)
name: the safe encoding format of name
value: the redacted value if name is x-auth-token,
or the safe encoding format of name
"""
if name in SENSITIVE_HEADERS:
v = value.encode('utf-8')
h = hashlib.sha1(v)
d = h.hexdigest()
return (name, "{SHA1}%s" % d)
else:
return (name, value)
def log_curl_request(self, method, url, kwargs):
curl = ['curl -i -X %s' % method]
for (key, value) in kwargs['headers'].items():
header = '-H \'%s: %s\'' % self._process_header(key, value)
curl.append(header)
if not self.session.verify:
curl.append('-k')
elif isinstance(self.session.verify, six.string_types):
curl.append('--cacert %s' % self.session.verify)
if self.session.cert:
curl.append('--cert %s' % self.session.cert[0])
curl.append('--key %s' % self.session.cert[1])
if 'body' in kwargs:
body = strutils.mask_password(kwargs['body'])
curl.append('-d \'%s\'' % body)
curl.append(urlparse.urljoin(self.endpoint_trimmed, url))
LOG.debug(' '.join(curl))
@staticmethod
def log_http_response(resp, body=None):
# NOTE(aarefiev): resp.raw is urllib3 response object, it's used
# only to get 'version', response from request with 'stream = True'
# should be used for raw reading.
status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
dump.extend(['%s: %s' % (k, v) for k, v in resp.headers.items()])
dump.append('')
if body:
body = strutils.mask_password(body)
dump.extend([body, ''])
LOG.debug('\n'.join(dump))
def _make_connection_url(self, url):
return urlparse.urljoin(self.endpoint_trimmed, url)
def _parse_version_headers(self, resp):
return self._generic_parse_version_headers(resp.headers.get)
def _make_simple_request(self, conn, method, url):
return conn.request(method, self._make_connection_url(url))
@with_retries
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around request.Session.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
if self.os_ironic_api_version:
kwargs['headers'].setdefault('X-OpenStack-Ironic-API-Version',
self.os_ironic_api_version)
if self.auth_token:
kwargs['headers'].setdefault('X-Auth-Token', self.auth_token)
self.log_curl_request(method, url, kwargs)
# NOTE(aarefiev): This is for backwards compatibility, request
# expected body in 'data' field, previously we used httplib,
# which expected 'body' field.
body = kwargs.pop('body', None)
if body:
kwargs['data'] = body
conn_url = self._make_connection_url(url)
try:
resp = self.session.request(method,
conn_url,
**kwargs)
# TODO(deva): implement graceful client downgrade when connecting
# to servers that did not support microversions. Details here:
# http://specs.openstack.org/openstack/ironic-specs/specs/kilo/api-microversions.html#use-case-3b-new-client-communicating-with-a-old-ironic-user-specified # noqa
if resp.status_code == http_client.NOT_ACCEPTABLE:
negotiated_ver = self.negotiate_version(self.session, resp)
kwargs['headers']['X-OpenStack-Ironic-API-Version'] = (
negotiated_ver)
return self._http_request(url, method, **kwargs)
except requests.exceptions.RequestException as e:
message = (_("Error has occurred while handling "
"request for %(url)s: %(e)s") %
dict(url=conn_url, e=e))
# NOTE(aarefiev): not valid request(invalid url, missing schema,
# and so on), retrying is not needed.
if isinstance(e, ValueError):
raise exc.ValidationError(message)
raise exc.ConnectionRefused(message)
body_str = None
if resp.headers.get('Content-Type') == 'application/octet-stream':
body_iter = resp.iter_content(chunk_size=CHUNKSIZE)
self.log_http_response(resp)
else:
# Read body into string if it isn't obviously image data
body_str = resp.text
self.log_http_response(resp, body_str)
body_iter = six.StringIO(body_str)
if resp.status_code >= http_client.BAD_REQUEST:
error_json = _extract_error_json(body_str)
raise exc.from_response(
resp, error_json.get('faultstring'),
error_json.get('debuginfo'), method, url)
elif resp.status_code in (http_client.MOVED_PERMANENTLY,
http_client.FOUND,
http_client.USE_PROXY):
# Redirected. Reissue the request to the new location.
return self._http_request(resp['location'], method, **kwargs)
elif resp.status_code == http_client.MULTIPLE_CHOICES:
raise exc.from_response(resp, method=method, url=url)
return resp, body_iter
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['body'] = jsonutils.dump_as_bytes(kwargs['body'])
resp, body_iter = self._http_request(url, method, **kwargs)
content_type = resp.headers.get('Content-Type')
if (resp.status_code in (http_client.NO_CONTENT,
http_client.RESET_CONTENT)
or content_type is None):
return resp, list()
if 'application/json' in content_type:
body = ''.join([chunk for chunk in body_iter])
try:
body = jsonutils.loads(body)
except ValueError:
LOG.error(_LE('Could not decode response body as JSON'))
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
class VerifiedHTTPSConnection(six.moves.http_client.HTTPSConnection):
"""httplib-compatible connection using client-side SSL authentication
:see http://code.activestate.com/recipes/
577548-https-httplib-client-connection-with-certificate-v/
"""
def __init__(self, host, port, key_file=None, cert_file=None,
ca_file=None, timeout=None, insecure=False):
six.moves.http_client.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
if ca_file is not None:
self.ca_file = ca_file
else:
self.ca_file = self.get_system_ca_file()
self.timeout = timeout
self.insecure = insecure
def connect(self):
"""Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate against
our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
if self.insecure is True:
kwargs = {'cert_reqs': ssl.CERT_NONE}
else:
kwargs = {'cert_reqs': ssl.CERT_REQUIRED, 'ca_certs': self.ca_file}
if self.cert_file:
kwargs['certfile'] = self.cert_file
if self.key_file:
kwargs['keyfile'] = self.key_file
self.sock = ssl.wrap_socket(sock, **kwargs)
@staticmethod
def get_system_ca_file():
"""Return path to system default CA file."""
# Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
# Suse, FreeBSD/OpenBSD
ca_path = ['/etc/ssl/certs/ca-certificates.crt',
'/etc/pki/tls/certs/ca-bundle.crt',
'/etc/ssl/ca-bundle.pem',
'/etc/ssl/cert.pem']
for ca in ca_path:
if os.path.exists(ca):
return ca
return None
class SessionClient(VersionNegotiationMixin, adapter.LegacyJsonAdapter):
"""HTTP client based on Keystone client session."""
def __init__(self,
os_ironic_api_version,
api_version_select_state,
max_retries,
retry_interval,
endpoint,
**kwargs):
self.os_ironic_api_version = os_ironic_api_version
self.api_version_select_state = api_version_select_state
self.conflict_max_retries = max_retries
self.conflict_retry_interval = retry_interval
self.endpoint = endpoint
super(SessionClient, self).__init__(**kwargs)
def _parse_version_headers(self, resp):
return self._generic_parse_version_headers(resp.headers.get)
def _make_simple_request(self, conn, method, url):
# NOTE: conn is self.session for this class
return conn.request(url, method, raise_exc=False)
@with_retries
def _http_request(self, url, method, **kwargs):
kwargs.setdefault('user_agent', USER_AGENT)
kwargs.setdefault('auth', self.auth)
if isinstance(self.endpoint_override, six.string_types):
kwargs.setdefault(
'endpoint_override',
_trim_endpoint_api_version(self.endpoint_override)
)
if getattr(self, 'os_ironic_api_version', None):
kwargs['headers'].setdefault('X-OpenStack-Ironic-API-Version',
self.os_ironic_api_version)
endpoint_filter = kwargs.setdefault('endpoint_filter', {})
endpoint_filter.setdefault('interface', self.interface)
endpoint_filter.setdefault('service_type', self.service_type)
endpoint_filter.setdefault('region_name', self.region_name)
resp = self.session.request(url, method,
raise_exc=False, **kwargs)
if resp.status_code == http_client.NOT_ACCEPTABLE:
negotiated_ver = self.negotiate_version(self.session, resp)
kwargs['headers']['X-OpenStack-Ironic-API-Version'] = (
negotiated_ver)
return self._http_request(url, method, **kwargs)
if resp.status_code >= http_client.BAD_REQUEST:
error_json = _extract_error_json(resp.content)
raise exc.from_response(resp, error_json.get('faultstring'),
error_json.get('debuginfo'), method, url)
elif resp.status_code in (http_client.MOVED_PERMANENTLY,
http_client.FOUND, http_client.USE_PROXY):
# Redirected. Reissue the request to the new location.
location = resp.headers.get('location')
resp = self._http_request(location, method, **kwargs)
elif resp.status_code == http_client.MULTIPLE_CHOICES:
raise exc.from_response(resp, method=method, url=url)
return resp
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
kwargs['headers'].setdefault('Accept', 'application/json')
if 'body' in kwargs:
kwargs['data'] = jsonutils.dump_as_bytes(kwargs.pop('body'))
resp = self._http_request(url, method, **kwargs)
body = resp.content
content_type = resp.headers.get('content-type', None)
status = resp.status_code
if (status in (http_client.NO_CONTENT, http_client.RESET_CONTENT) or
content_type is None):
return resp, list()
if 'application/json' in content_type:
try:
body = resp.json()
except ValueError:
LOG.error(_LE('Could not decode response body as JSON'))
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
return self._http_request(url, method, **kwargs)
def _construct_http_client(endpoint=None,
session=None,
token=None,
auth_ref=None,
os_ironic_api_version=DEFAULT_VER,
api_version_select_state='default',
max_retries=DEFAULT_MAX_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
timeout=600,
ca_file=None,
cert_file=None,
key_file=None,
insecure=None,
**kwargs):
if session:
kwargs.setdefault('service_type', 'baremetal')
kwargs.setdefault('user_agent', 'python-ironicclient')
kwargs.setdefault('interface', kwargs.pop('endpoint_type', None))
kwargs.setdefault('endpoint_override', endpoint)
ignored = {'token': token,
'auth_ref': auth_ref,
'timeout': timeout != 600,
'ca_file': ca_file,
'cert_file': cert_file,
'key_file': key_file,
'insecure': insecure}
dvars = [k for k, v in ignored.items() if v]
if dvars:
LOG.warning(_LW('The following arguments are ignored when using '
'the session to construct a client: %s'),
', '.join(dvars))
return SessionClient(session=session,
os_ironic_api_version=os_ironic_api_version,
api_version_select_state=api_version_select_state,
max_retries=max_retries,
retry_interval=retry_interval,
endpoint=endpoint,
**kwargs)
else:
if kwargs:
LOG.warning(_LW('The following arguments are being ignored when '
'constructing the client: %s'), ', '.join(kwargs))
return HTTPClient(endpoint=endpoint,
token=token,
auth_ref=auth_ref,
os_ironic_api_version=os_ironic_api_version,
api_version_select_state=api_version_select_state,
max_retries=max_retries,
retry_interval=retry_interval,
timeout=timeout,
ca_file=ca_file,
cert_file=cert_file,
key_file=key_file,
insecure=insecure)
| apache-2.0 | 5,864,973,488,052,471,000 | 39.873817 | 175 | 0.575095 | false |
4teamwork/ftw.pdfify | setup.py | 1 | 1342 | from setuptools import setup, find_packages
import os
version = '1.0.0.dev0'
tests_require = [
'ftw.builder',
'ftw.testbrowser',
'plone.app.testing',
]
setup(name='ftw.pdfify',
version=version,
description='Create PDF from documents.',
long_description=open('README.md').read() + '\n' + \
open(os.path.join('docs', 'HISTORY.txt')).read(),
# Get more strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Framework :: Plone',
'Framework :: Plone :: 4.1',
'Framework :: Plone :: 4.2',
'Framework :: Plone :: 4.3',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='',
author='4teamwork AG',
author_email='mailto:[email protected]',
url='https://github.com/4teamwork/ftw.pdfify',
license='GPL2',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['ftw'],
include_package_data=True,
zip_safe=False,
install_requires=[
'pdfify_celery',
],
tests_require=tests_require,
extras_require=dict(tests=tests_require),
entry_points='''
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
''',
)
| gpl-2.0 | -8,820,520,165,633,576,000 | 25.84 | 84 | 0.577496 | false |
onepercentclub/django-token-auth | token_auth/tests/test_booking.py | 1 | 13504 | import base64
import hashlib
import hmac
from datetime import datetime, timedelta
from Crypto.Cipher import AES
from Crypto import Random
import mock
from django.test.testcases import TestCase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import RequestFactory
from token_auth.exceptions import TokenAuthenticationError
from token_auth.auth.booking import TokenAuthentication
from token_auth.models import CheckedToken
from .factories import CheckedTokenFactory
TOKEN_AUTH_SETTINGS = {
'backend': 'token_auth.auth.booking.TokenAuthentication',
'sso_url': 'https://example.org',
'token_expiration': 600,
'hmac_key': 'bbbbbbbbbbbbbbbb',
'aes_key': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
}
class TestBookingTokenAuthentication(TestCase):
"""
Tests the Token Authentication backend.
"""
def setUp(self):
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS, AUTH_USER_MODEL='tests.TestUser'):
self.request = RequestFactory().get('/api/sso/redirect')
# To keep things easy, let's just change the valid token to put some Xs
# on it at the beginning of each of those lines.
self.token = 'XbaTf5AVWkpkiACH6nNZZUVzZR0rye7rbiqrm3Qrgph5Sn3EwsFERytBwoj' \
'XaqSdISPvvc7aefusFmHDXAJbwLvCJ3N73x4whT7XPiJz7kfrFKYal6WlD8' \
'Xu5JZgVTmV5hdywGQkPMFT1Z7m4z1ga6Oud2KoQNhrf5cKzQ5CSdTojZmZ0' \
'XT24jBuwm5YUqFbvwTBxg=='
self.corrupt_token = self.token
self.auth_backend = TokenAuthentication(self.request, token=self.token)
self.checked_token = CheckedTokenFactory.create()
self.data = 'time=2013-12-23 17:51:15|username=johndoe|name=John Doe' \
'|[email protected]'
# Get the new security keys to use it around in the tests.
self.hmac_key = self.auth_backend.settings['hmac_key']
self.aes_key = self.auth_backend.settings['aes_key']
def _encode_message(self, message):
"""
Helper method for unit tests which returns an encoded version of the
message passed as an argument.
It returns a tuple containing a string formed by two elements:
1. A string formed by the initialization vector and the AES-128
encrypted message.
2. The HMAC-SHA1 hash of that string.
"""
pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(
AES.block_size - len(s) % AES.block_size)
init_vector = Random.new().read(AES.block_size)
cipher = AES.new(self.aes_key, AES.MODE_CBC, init_vector)
padded_message = pad(message)
aes_message = init_vector + cipher.encrypt(padded_message)
hmac_digest = hmac.new(self.hmac_key, aes_message, hashlib.sha1)
return aes_message, hmac_digest
def test_sso_url(self):
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
self.assertEqual(self.auth_backend.sso_url(), TOKEN_AUTH_SETTINGS['sso_url'])
def test_sso_url_custom_target(self):
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
self.assertEqual(
self.auth_backend.sso_url(target_url='/test/'),
TOKEN_AUTH_SETTINGS['sso_url'] + '?url=%2Ftest%2F'
)
def test_sso_url_custom_target_unicode(self):
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
self.assertEqual(
self.auth_backend.sso_url(target_url=u'/test/\u2026/bla'),
TOKEN_AUTH_SETTINGS['sso_url'] + '?url=%2Ftest%2F%E2%80%A6%2Fbla'
)
def test_check_hmac_signature_ok(self):
"""
Tests that the method to check up HMAC signature of the token message
returns True when it is a valid signature.
"""
message = base64.urlsafe_b64decode(self.checked_token.token)
self.assertTrue(self.auth_backend.check_hmac_signature(message))
def test_check_hmac_signature_wrong(self):
"""
Tests the method to check up HMAC signature when the token is corrupted
and the signatures is not valid.
"""
message = base64.b64decode(self.corrupt_token)
self.assertFalse(self.auth_backend.check_hmac_signature(message))
def test_decrypts_message(self):
"""
Tests the method to decrypt the AES encoded message.
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
aes_message, hmac_digest = self._encode_message(self.data)
token = base64.urlsafe_b64encode(aes_message + hmac_digest.digest())
auth_backend = TokenAuthentication(self.request, token=token)
message = auth_backend.decrypt_message()
self.assertEqual(
message, {'timestamp': '2013-12-23 17:51:15',
'first_name': 'John',
'last_name': 'Doe',
'email': '[email protected]',
'username': '[email protected]',
'remote_id': '[email protected]'
})
def test_get_login_data(self):
"""
Tests the method to split the login message data into a 4-field tuple.
"""
login_data = self.auth_backend.get_login_data(self.data)
self.assertTupleEqual(
login_data,
(
'2013-12-23 17:51:15',
'johndoe',
'John Doe',
'[email protected]'
))
def test_check_timestamp_valid_token(self):
"""
Tests the method to check the login message timestamp when a good
token is received.
"""
login_time = (datetime.now() - timedelta(seconds=10)).strftime('%Y-%m-%d %H:%M:%S')
self.auth_backend.check_timestamp({'timestamp': login_time})
def test_check_timestamp_timedout_token(self):
"""
Tests the method to check the login message timestamp when a wrong
timestamp is given.
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
login_time = (datetime.now() - timedelta(
days=self.auth_backend.settings['token_expiration'] + 1
)).strftime('%Y-%m-%d %H:%M:%S')
self.assertRaises(
TokenAuthenticationError,
self.auth_backend.check_timestamp,
{'timestamp': login_time})
def test_authenticate_fail_no_token(self):
"""
Tests that ``authenticate`` method raises an exception when no token
is provided.
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
auth_backend = TokenAuthentication(self.request)
self.assertRaisesMessage(
TokenAuthenticationError,
'No token provided',
auth_backend.authenticate)
def test_authenticate_fail_token_used(self):
"""
Tests that ``authenticate`` method raises an exception when a used
token is provided.
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
auth_backend = TokenAuthentication(self.request, token=self.checked_token.token)
self.assertRaisesMessage(
TokenAuthenticationError,
'Token was already used and is not valid',
auth_backend.authenticate)
def test_authenticate_fail_corrupted_token(self):
"""
Tests that ``authenticate`` method raises an exception when a corrupt
token is received (HMAC-SHA1 checking).
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
auth_backend = TokenAuthentication(self.request, token=self.corrupt_token)
self.assertRaisesMessage(
TokenAuthenticationError,
'HMAC authentication failed',
auth_backend.authenticate)
def test_authenticate_fail_invalid_login_data(self):
"""
Tests that ``authenticate`` method raises an exception when a valid
token was received but it didn't contained valid authentication data,
so the message contained in the token was not as expected.
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
message = 'xxxx=2013-12-18 11:51:15|xxxxxxxx=johndoe|xxxx=John Doe|' \
'[email protected]'
aes_message, hmac_digest = self._encode_message(message)
token = base64.urlsafe_b64encode(aes_message + hmac_digest.digest())
auth_backend = TokenAuthentication(self.request, token=token)
self.assertRaisesMessage(
TokenAuthenticationError,
'Message does not contain valid login data',
auth_backend.authenticate)
def test_authenticate_fail_token_expired(self):
"""
Tests that ``authenticate`` method raises an exception when the token
expired.
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS):
# Set up a token with an old date (year 2012).
message = 'time=2012-12-18 11:51:15|username=johndoe|name=John Doe|' \
'[email protected]'
aes_message, hmac_digest = self._encode_message(message)
token = base64.urlsafe_b64encode(aes_message + hmac_digest.digest())
auth_backend = TokenAuthentication(self.request, token=token)
self.assertRaisesMessage(
TokenAuthenticationError,
'Authentication token expired',
auth_backend.authenticate)
def test_authenticate_successful_login(self):
"""
Tests ``authenticate`` method when it performs a successful login.
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS, AUTH_USER_MODEL='tests.TestUser'):
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message = 'time={0}|username=johndoe|name=John Doe|' \
'[email protected]'.format(timestamp)
aes_message, hmac_digest = self._encode_message(message)
token = base64.urlsafe_b64encode(aes_message + hmac_digest.digest())
auth_backend = TokenAuthentication(self.request, token=token)
user, created = auth_backend.authenticate()
# Check created user data.
self.assertEqual(user.first_name, 'John')
self.assertEqual(user.is_active, True)
# Check `CheckedToken` related object.
checked_token = CheckedToken.objects.latest('pk')
self.assertEqual(checked_token.token, token)
self.assertEqual(checked_token.user.username, user.username)
@mock.patch.object(get_user_model(), 'get_login_token', create=True, return_value='tralala')
def test_login_view(self, get_jwt_token):
"""
Test the login view for booking
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS, ROOT_URLCONF='token_auth.urls',
AUTH_USER_MODEL='tests.TestUser'):
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message = 'time={0}|username=johndoe|name=John Doe|' \
'[email protected]'.format(timestamp)
aes_message, hmac_digest = self._encode_message(message)
token = base64.urlsafe_b64encode(aes_message + hmac_digest.digest())
login_url = reverse('token-login', kwargs={'token': token})
response = self.client.get(login_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response['Location'],
"/login-with/2/tralala"
)
@mock.patch.object(get_user_model(), 'get_login_token', create=True, return_value='tralala')
def test_link_view(self, get_jwt_token):
"""
Test the link view for booking
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS, ROOT_URLCONF='token_auth.urls',
AUTH_USER_MODEL='tests.TestUser'):
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
message = 'time={0}|username=johndoe|name=John Doe|' \
'[email protected]'.format(timestamp)
aes_message, hmac_digest = self._encode_message(message)
token = base64.urlsafe_b64encode(aes_message + hmac_digest.digest())
login_url = reverse('token-login-link', kwargs={'token': token, 'link': '/projects/my-project'})
response = self.client.get(login_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response['Location'],
"/login-with/2/tralala?next=%2Fprojects%2Fmy-project"
)
def test_redirect_view(self):
"""
Test the redirect view for booking
"""
with self.settings(TOKEN_AUTH=TOKEN_AUTH_SETTINGS, ROOT_URLCONF='token_auth.urls'):
redirect_url = reverse('token-redirect')
response = self.client.get(redirect_url, {'url': '/projects/my-project'})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response['Location'],
"https://example.org?url=%2Fprojects%2Fmy-project"
)
| gpl-2.0 | -5,120,148,098,152,944,000 | 41.332288 | 108 | 0.607153 | false |
dpineo/gadann | gadann/layer.py | 1 | 22760 | #
# GADANN - GPU Accelerated Deep Artificial Neural Network
#
# Copyright (C) 2014 Daniel Pineo ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import numpy
import operator
import scipy
import logging
import cv2
import functools
import pickle
from . import cudnn
from . import kernels
from . import tensor
logger = logging.getLogger(__name__)
# ----------------------- Layer --------------------------
class Layer(object):
def __init__(self, **kwargs):
self.params = []
self.name = kwargs['name']
self.input_shape = kwargs['input_shape'] + (3-len(kwargs['input_shape']))*(1,)
self.input_size = functools.reduce(operator.__mul__, self.input_shape, 1)
self.updater = kwargs['updater']
pass
def fprop(self, input):
return input
def bprop(self, input, fprop_result=None):
return input
def gradient(self, input, output):
return {}
def update(self, grads):
self.updater.update(self.params, grads)
def show(self):
pass
def save(self):
# Save weights to file
fo = gzip.GzipFile(self.name + '_weights.gz', 'wb')
pickle.dump([param.get() for param in self.params], fo)
fo.close()
# ----------------------- LinearLayer --------------------------
class LinearLayer(Layer):
def __init__(self, **kwargs):
super(LinearLayer, self).__init__(**kwargs)
if 'stride' in kwargs:
self.stride = kwargs['stride']
else:
self.stride = (1, 1)
if 'padding' in kwargs:
self.padding = kwargs['padding']
else:
self.padding = (0, 0)
self.n_features = kwargs['n_features']
try:
self.shape = (self.n_features, self.input_shape[0]) + kwargs['shape'] + (2-len(kwargs['shape']))*(1,)
except:
self.shape = (self.n_features,)+kwargs['input_shape']
self.output_shape = (self.shape[0],)+tuple([(x-y)//s+1+2*p for x, y, s, p in zip(self.input_shape[-2:], self.shape[-2:], self.stride, self.padding)])
init = kwargs.get('init', 0.01)
w = tensor.Tensor(init*numpy.random.randn(*self.shape))
v_bias = tensor.zeros((1, self.input_size))
h_bias = tensor.zeros((1, self.n_features))
self.params = {'w': w, 'v_bias': v_bias, 'h_bias': h_bias}
return
self.filter_descriptor = cudnn.cudnnCreateFilterDescriptor()
self.bias_descriptor = cudnn.cudnnCreateTensorDescriptor()
self.convolution_descriptor = cudnn.cudnnCreateConvolutionDescriptor()
self.input_descriptor = cudnn.cudnnCreateTensorDescriptor()
self.output_descriptor = cudnn.cudnnCreateTensorDescriptor()
cudnn.cudnnSetFilter4dDescriptor(
self.filter_descriptor,
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*w.shape
)
logger.info('filter_descriptor:', cudnn.cudnnGetFilter4dDescriptor(self.filter_descriptor))
cudnn.cudnnSetTensor4dDescriptor(
self.bias_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*(h_bias.shape + (1,1))
)
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(shape=" + str(self.input_shape) + ")"
def show(self):
cv2.imshow(self.name, .1*self.params['w'].mosaic().get()+.5)
cv2.waitKey(1)
def fprop(self, input):
#if self.shape[1:] == input.shape[1:]:
if True:
return self.fprop_dense(input)
else:
return self.fprop_conv(input)
def bprop(self, input, fprop_result=None):
#if self.shape[0] == input.shape[1]:
if True:
return self.bprop_dense(input, fprop_result)
else:
return self.bprop_conv(input, fprop_result)
def gradient(self, input, output):
#if self.shape[1:] == input.size:
if True:
return self.gradient_dense(input, output)
else:
return self.gradient_conv(input, output)
def fprop_dense(self, input):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
result = input.dot(w.T()) + input.ones_vector.dot(h_bias)
assert not numpy.isnan(result.get()).any()
return result
def bprop_dense(self, input, fprop_result=None):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
result = input.dot(w) + input.ones_vector.dot(v_bias)
assert not numpy.isnan(result.get()).any()
return result
def fprop_conv(self, input):
assert len(input.shape) == 4
w, v_bias, h_bias = self.params
assert not numpy.isnan(w.get()).any()
assert not numpy.isnan(v_bias.get()).any()
assert not numpy.isnan(h_bias.get()).any()
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
logger.info('input_descriptor:', cudnn.cudnnGetTensor4dDescriptor(self.input_descriptor))
cudnn.cudnnSetConvolution2dDescriptor(
self.convolution_descriptor,
self.padding[0], self.padding[1], self.stride[0], self.stride[1], 1, 1,
cudnn.cudnnConvolutionMode['CUDNN_CONVOLUTION'])
logger.info('convolution_descriptor:', cudnn.cudnnGetConvolution2dDescriptor(self.convolution_descriptor))
# Get output dimensions (first two values are n_input and filters_out)
batch, channels, height_output, width_output = cudnn.cudnnGetConvolution2dForwardOutputDim(
self.convolution_descriptor,
self.input_descriptor,
self.filter_descriptor
)
# Output tensor
output = tensor.Tensor((batch, self.n_features, height_output, width_output))
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
logger.info('output_descriptor:', cudnn.cudnnGetTensor4dDescriptor(self.output_descriptor))
workspace_size = cudnn.cudnnGetConvolutionForwardWorkspaceSize(
cudnn_context,
self.input_descriptor,
self.filter_descriptor,
self.convolution_descriptor,
self.output_descriptor,
cudnn.cudnnConvolutionFwdPreference['CUDNN_CONVOLUTION_FWD_PREFER_FASTEST'],
).value
workspace = tensor.Tensor((workspace_size,))
logger.info('workspace_size:', workspace_size)
algo = cudnn.cudnnGetConvolutionForwardAlgorithm(
cudnn_context,
self.input_descriptor,
self.filter_descriptor,
self.convolution_descriptor,
self.output_descriptor,
cudnn.cudnnConvolutionFwdPreference['CUDNN_CONVOLUTION_FWD_PREFER_FASTEST'],
0
)
assert(not numpy.isnan(input.get()).any())
assert(not numpy.isnan(w.get()).any())
# Perform convolution
cudnn.cudnnConvolutionForward(
cudnn_context,
1,
self.input_descriptor,
input.data(),
self.filter_descriptor,
w.data(),
self.convolution_descriptor,
algo,
workspace.data(),
workspace_size,
0,
self.output_descriptor,
output.data()
)
assert( not numpy.isnan(output.get()).any())
cudnn.cudnnAddTensor(
cudnn_context,
cudnn.cudnnAddMode['CUDNN_ADD_SAME_C'],
1,
self.bias_descriptor,
h_bias.data(),
1,
self.output_descriptor,
output.data()
)
assert not numpy.isnan(output.get()).any()
return output
def bprop_conv(self, input, fprop_result=None):
assert len(input.shape) == 4
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
cudnn.cudnnSetConvolution2dDescriptor(
self.convolution_descriptor,
0, 0, 1, 1, 1, 1,
cudnn.cudnnConvolutionMode['CUDNN_CONVOLUTION'])
# Output tensor
output = tensor.Tensor((input.shape[0], w.shape[1], input.shape[2]+w.shape[2]-1, input.shape[3]+w.shape[3]-1))
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
# Perform convolution
cudnn.cudnnConvolutionBackwardData(
cudnn_context,
1,
self.filter_descriptor,
w.data(),
self.input_descriptor,
input.data(),
self.convolution_descriptor,
0,
self.output_descriptor,
output.data()
)
assert not numpy.isnan(output.get()).any()
return output
def gradient_dense(self, v, h):
return {
'w': h.T().dot(v),
'h_bias': h.T().dot(h.ones_vector)
}
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*v.shape
)
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*(h.shape + (1,1))
)
w_grad = tensor.zeros(w.shape)
v_bias_grad = tensor.zeros(v_bias.shape)
h_bias_grad = tensor.zeros(h_bias.shape)
# Perform convolution
cudnn.cudnnConvolutionBackwardFilter(
cudnn_context,
1,
self.input_descriptor,
v.data(),
self.output_descriptor,
h.data(),
self.convolution_descriptor,
1,
self.filter_descriptor,
w_grad.data()
)
cudnn.cudnnConvolutionBackwardBias(
cudnn_context,
1,
self.output_descriptor,
h.data(),
1,
self.bias_descriptor,
h_bias_grad.data()
)
assert not numpy.isnan(w.get()).any()
assert not numpy.isnan(h_bias.get()).any()
return [w_grad, v_bias_grad, h_bias_grad]
'''
def gradient_dense(self, input, output):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
w_grad = tensor.zeros(w.shape)
v_bias_grad = tensor.zeros(v_bias.shape)
h_bias_grad = tensor.zeros(h_bias.shape)
tensor.sgemm(output.T(), input, w_grad, alpha=1, beta=0)
tensor.sgemv(h_bias_grad, output.T(), input.ones_vector.T(), alpha=1, beta=0)
assert not numpy.isnan(w_grad.get()).any()
assert not numpy.isnan(v_bias_grad.get()).any()
assert not numpy.isnan(h_bias_grad.get()).any()
return {'w': w_grad, 'v_bias': v_bias_grad, 'h_bias': h_bias_grad}
'''
def gradient_conv(self, input, output):
w, v_bias, h_bias = self.params['w'], self.params['v_bias'], self.params['h_bias']
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
w_grad = tensor.zeros(w.shape)
v_bias_grad = tensor.zeros(v_bias.shape)
h_bias_grad = tensor.zeros(h_bias.shape)
# Perform convolution
cudnn.cudnnConvolutionBackwardFilter(
cudnn_context,
1,
self.input_descriptor,
input.data(),
self.output_descriptor,
output.data(),
self.convolution_descriptor,
1,
self.filter_descriptor,
w_grad.data()
)
cudnn.cudnnConvolutionBackwardBias(
cudnn_context,
1,
self.output_descriptor,
output.data(),
1,
self.bias_descriptor,
h_bias_grad.data()
)
assert not numpy.isnan(w_grad.get()).any()
assert not numpy.isnan(h_bias_grad.get()).any()
return [w_grad, v_bias_grad, h_bias_grad]
# ----------------------- DenseLayer --------------------------
class DenseLayer(LinearLayer):
def __init__(self, **kwargs):
super(DenseLayer, self).__init__(**kwargs)
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(shape=" + str(self.feature_shape) + ")"
def show(self):
w = self.params['w']
if w.shape[1] not in (1, 3):
return
cv2.imshow(self.name, self.params['w'].mosaic().get()/10+.5)
cv2.moveWindow(self.name, 0, 0)
cv2.waitKey(1)
class ActivationLayer(Layer):
def __init__(self, activation, **kwargs):
super(ActivationLayer, self).__init__(**kwargs)
self.activation = activation
self.d_activation = getattr(kernels, 'd'+activation.__name__)
self.output_shape = self.input_shape
def fprop(self, input):
result = self.activation(input)
assert not numpy.isnan(result.get()).any()
return result
def bprop(self, input, fprop_result=None):
if fprop_result:
result = self.d_activation(input, fprop_result)
else:
f = self.activation(input)
result = (tensor.Tensor(numpy.ones_like(f.get()))-f) * f
assert not numpy.isnan(result.get()).any()
return result
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(activation='" + self.activation.__name__ + "')"
class DropoutLayer(Layer):
def __init__(self, **kwargs):
super(DropoutLayer, self).__init__(**kwargs)
self.p_exclude = float(kwargs.pop('prob'))
self.p_include = float(1-self.p_exclude)
self.output_shape = self.input_shape
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(p=" + str(self.p_exclude) + ")"
class MaxPoolingLayer(Layer):
def __init__(self, **kwargs):
super(MaxPoolingLayer, self).__init__(**kwargs)
if 'padding' not in kwargs:
kwargs['padding'] = (0, 0)
if 'stride' not in kwargs:
kwargs['stride'] = (1, 1)
self.shape = kwargs['shape']
self.padding = kwargs['padding']
self.stride = kwargs['stride']
self.output_shape = (self.input_shape[0], (self.input_shape[1]+2*self.padding[0])/self.stride[0], (self.input_shape[2]+2*self.padding[1])/self.stride[1])
self.pooling_descriptor = cudnn.cudnnCreatePoolingDescriptor()
self.input_descriptor = cudnn.cudnnCreateTensorDescriptor()
self.output_descriptor = cudnn.cudnnCreateTensorDescriptor()
cudnn.cudnnSetPooling2dDescriptor(
self.pooling_descriptor,
cudnn.cudnnPoolingMode['CUDNN_POOLING_MAX'],
self.shape[0],
self.shape[1],
self.padding[0],
self.padding[1],
self.stride[0],
self.stride[1]
)
pass
def fprop(self, input):
cudnn.cudnnSetTensor4dDescriptor(
self.input_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*input.shape
)
# Output tensor
output = tensor.Tensor((input.shape[0], input.shape[1], input.shape[2]/self.stride[0], input.shape[3]/self.stride[1]))
cudnn.cudnnSetTensor4dDescriptor(
self.output_descriptor,
cudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW'],
cudnn.cudnnDataType['CUDNN_DATA_FLOAT'],
*output.shape
)
cudnn.cudnnPoolingForward(
cudnn_context,
self.pooling_descriptor,
1,
self.input_descriptor,
input.data(),
0,
self.output_descriptor,
output.data()
)
return output
def __str__(self):
return self.name + " - " + self.__class__.__name__ + "(p=" + str(self.p_exclude) + ")"
class ReshapeLayer(Layer):
def __init__(self, **kwargs):
super(ReshapeLayer, self).__init__(**kwargs)
self.input_shape = kwargs['input_shape']
self.output_shape = kwargs['output_shape']
assert(reduce(operator.__mul__, self.input_shape, 1) == reduce(operator.__mul__, self.output_shape, 1))
def fprop(self, input):
assert input.shape[1:] == self.input_shape
input.shape = (input.shape[0],) + self.output_shape
return input
def bprop(self, input, fprop_result=None):
assert input.shape[1:] == self.output_shape
input.shape = (input.shape[0],) + self.input_shape
return input
# ----------------------- ConvLayer --------------------------
class GadannConvLayer(LinearLayer):
def __init__(self, **kwargs):
super(ConvLayer, self).__init__(**kwargs)
# self.conv_step = kwargs['conv_step']
# self.w.axes = ['features_out', 'features_in', 'height', 'width']
def fprop(self, input):
w, v_bias, h_bias = self.params
result = Tensor((input.shape[0],w.shape[0])+tuple([x-y for x,y in zip(input.shape[-2:],w.shape[-2:])]))
grid = (result.shape[-2]/16,result.shape[-1]/16,result.shape[0])
conv2d_16x16_kernel(input.gpuarray, w.gpuarray, h_bias.gpuarray, result.gpuarray, numpy.uint32(self.w.shape[1]), numpy.uint32(w.shape[0]), block=(16,16,1), grid=grid)
assert not numpy.isnan(result.get()).any()
return result
def bprop(self, input, fprop_result=None):
w, v_bias, h_bias = self.params
result = tensor.zeros((input.shape[0],w.shape[1])+tuple([x+y for x,y in zip(input.shape[-2:],w.shape[-2:])]))
grid = (input.shape[3]/16,input.shape[2]/16,1)
bconv2d_16x16_kernel(input.gpuarray, w.gpuarray, v_bias.gpuarray, result.gpuarray, numpy.uint32(w.shape[0]), numpy.uint32(w.shape[1]), block=(16,16,1), grid=grid)
assert not numpy.isnan(result.get()).any()
return result
def gradient(self, v, h):
w, v_bias, h_bias = self.params
w_update = tensor.zeros(w.shape)
v_bias_update = tensor.zeros(v_bias.shape)
h_bias_update = tensor.zeros(h_bias.shape)
grid = (h.shape[-2]/16,h.shape[-1]/16) # revisit: grid rounds down to nearest 16
kernels.iconv2d_16x16_kernel(v.gpuarray, h.gpuarray, w_update.gpuarray, numpy.uint32(w_update.shape[1]), numpy.uint32(w_update.shape[0]), block=(16,16,1), grid=grid)
kernels.iconv2d_h_bias_16x16_naive_kernel(h.gpuarray, h_bias_update.gpuarray, numpy.uint32(reduce(operator.__mul__, h.shape[-2:], 1)), block=(w_update.shape[0],1,1), grid=grid)
v_bias_block = w.shape[-2:] + (1,)
v_bias_grid = (1,1,1)
kernels.iconv2d_v_bias_16x16_naive_kernel(v.gpuarray, v_bias_update.gpuarray, numpy.uint32(v.shape[1]-16), numpy.uint32(v.shape[2]-16), block=v_bias_block, grid=v_bias_grid)
assert not numpy.isnan(w_update.get()).any()
assert not numpy.isnan(v_bias_update.get()).any()
assert not numpy.isnan(h_bias_update.get()).any()
return [w_update, v_bias_update, h_bias_update]
# ----------------------- NumpyConvLayer --------------------------
class NumpyConvLayer(LinearLayer):
def fprop(self, input):
result = []
for n in range(self.w.shape[0]):
result.append(scipy.signal.convolve(input.get()[0,...], self.w.get()[n,...], mode='valid'))
return Tensor(numpy.ascontiguousarray(numpy.vstack(result)[numpy.newaxis,...]))
def bprop(self, input, fprop_result=None):
result = []
for n in range(self.w.shape[0]):
w = numpy.fliplr(numpy.flipud(self.w.get()[n,...]))
result.append(scipy.signal.convolve(input.get()[n,...], w, mode='full'))
result = numpy.vstack(result).mean(axis=0)
return Tensor(result)
# TODO: disabled for Fermi
#cudnn_context = cudnn.cudnnCreate()
| mit | 3,916,060,930,377,858,000 | 34.532905 | 184 | 0.564587 | false |
denfromufa/mipt-course | pysandbox/torrent_dht_demo_test.py | 1 | 9817 | # Copyright (c) 2012 Timur Iskhodzhanov and MIPT students. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import logging
import os
import random
import re
import sys
import Queue # Renamed to queue in 3.0
import unittest
class FakeDHT(object):
# This is a fake DHT, see http://en.wikipedia.org/wiki/Distributed_hash_table
def __init__(self):
self.__table = {}
# O(log N)
#
# TODO(timurrrr): in fact, we don't need a precise set for a given key,
# a non-empty subset of the most recent machines would work.
# This can also automatically fix the abscence of Remove()
def Append(self, key, new_values):
# will raise exception if 'values' is not a sequence
logging.debug("FakeDHT.Append('%s', '%s')", key, new_values)
if key not in self.__table.keys():
self.__table[key] = new_values
else:
self.__table[key].extend(new_values)
# O(log N)
def Get(self, key):
ret = self.__table[key]
logging.debug("FakeDHT.Get('%s') returns '%s'", key, ret)
return ret
class FakeP2PNetwork(object):
def __init__(self):
# will contain (key -> (receive queue) pairs
# where receive queue holds list of (sender_id, message) pairs.
self.__nodes = {}
self.__availableIDs = set(["Alice", "Brian", "Colin", "David", "Ellie"])
# Returns new node ID
def CreateNode(self):
new_id = random.choice(list(self.__availableIDs))
self.__availableIDs.remove(new_id)
self.__nodes[new_id] = Queue.Queue()
logging.info("New node: %s" % new_id)
return new_id
# Returns True on success, False on failure
# Design flaw: we can use other node's ID as sender_id
# TODO(timurrrr): FakeSocket
def Send(self, sender_id, to_id, message):
assert sender_id in self.__nodes.keys()
if to_id not in self.__nodes.keys():
logging.error("'%s' node is unknown" % to_id)
return False
self.__nodes[to_id].put((sender_id, copy.deepcopy(message)))
return True
# Returns (from, message) pair if present (FIFO), None if no messages are
# available.
# Design flaw: we can use other node's ID as receiver_id
def Receive(self, receiver_id):
if self.__nodes[receiver_id].empty():
return None
return self.__nodes[receiver_id].get()
class TorrentID(object):
CHUNK_SIZE = 4
def __init__(self, contents):
# contents should be a string
self.length = len(contents)
self.filehash = hashlib.sha1(contents).hexdigest()
self.partial_hashes = []
for chunk_id in range(self.length / TorrentID.CHUNK_SIZE + 1):
ph = self.__CalculatePartialHash(contents, chunk_id)
if ph != None:
self.partial_hashes.append(ph)
logging.info("Created torrent:\nContents='%s'\n%s" % (contents, str(self)))
# implicitly called by str(torrent_id)
def __str__(self):
return "Torrent:\n hash=%s\n size=%d\n partial_hashes=[\n %s\n ]\n" % (
self.filehash, self.length, ",\n ".join(self.partial_hashes))
@staticmethod
def GetChunkData(contents, chunk_id):
return contents[chunk_id * TorrentID.CHUNK_SIZE :
(chunk_id+1) * TorrentID.CHUNK_SIZE]
@staticmethod
def SetChunkData(contents, chunk_id, new_data):
idx_l = chunk_id * TorrentID.CHUNK_SIZE
idx_r = idx_l + TorrentID.CHUNK_SIZE
return contents[:idx_l] + new_data + contents[idx_r:]
@staticmethod
def GetChunkHash(chunk_data):
return hashlib.sha1(chunk_data).hexdigest()
@staticmethod
def __CalculatePartialHash(contents, chunk_id):
chunk = TorrentID.GetChunkData(contents, chunk_id)
if len(chunk) > 0:
return TorrentID.GetChunkHash(chunk)
return None
def IsKnownChunk(self, contents, chunk_id):
return self.__CalculatePartialHash(contents, chunk_id) == (
self.partial_hashes[chunk_id])
class TorrentClient(object):
def __init__(self, network, dht):
self.__network = network
self.__id = network.CreateNode()
# Will store (<chunk>_key -> set(machines which know <chunk>)
self.__dht = dht
# Torrent hash -> (torrent, contents) dictionary.
# 'contents' should contain '*' for unknown bytes (TODO: this is a hack)
self.__data = {}
# Torrents to be downloaded
# (torrent hash -> set of missing chunk indices)
self.__partial_torrents = {}
# List of finished-but-not-taken torrent hashes
self.__downloaded = []
def GetDownloadedTorrent(self):
# Peek and return any downloaded torrent as a (torrent, contents) tuple
# (if present), otherwise None.
if len(self.__downloaded) == 0:
return None
ret_hash = self.__downloaded[-1]
self.__downloaded = self.__downloaded[:-1]
return self.__data[ret_hash]
@staticmethod
def __ChunkKey(torrent, chunk_id):
assert chunk_id in range(len(torrent.partial_hashes))
return "chunk_%s_%d" % (torrent.filehash, chunk_id)
def AddTorrent(self, torrent, known_contents=None):
assert torrent.filehash not in self.__data.keys()
if known_contents:
for chunk in range(len(torrent.partial_hashes)):
self.__dht.Append(self.__ChunkKey(torrent, chunk), [self.__id])
print "%s: Loaded torrent '%s'" % (self.__id, known_contents)
else:
known_contents = "*" * torrent.length
self.__partial_torrents[torrent.filehash] = (
set(range(len(torrent.partial_hashes))))
self.__data[torrent.filehash] = (torrent, known_contents)
def Tick(self):
message = self.__network.Receive(self.__id)
if message:
self.__ProcessMessage(message)
if len(self.__partial_torrents.keys()) > 0:
# Select random a torrent to download a chunk
filehash = random.choice(self.__partial_torrents.keys())
torrent = self.__data[filehash][0]
# ... random chunk
needed_chunks = self.__partial_torrents[filehash]
chunk = random.choice(list(needed_chunks))
chunk_key = self.__ChunkKey(torrent, chunk)
# ... random host
chunk_available_at = random.choice(self.__dht.Get(chunk_key))
# Ask the host to send the chunk of that torrent
self.__network.Send(self.__id, chunk_available_at, "give_" + chunk_key)
def __ProcessMessage(self, msg):
(from_id, contents) = msg
logging.debug("Node '%s' received a message '%s' from '%s'",
self.__id, contents, from_id)
m = re.match("give_chunk_([0-9a-f]+)_([0-9]+)", contents)
if m:
# Process "give_chunk_<hash>_<chunk>" message
(filehash, chunk_id) = m.groups()
if filehash not in self.__data.keys():
logging.error("Hey, I don't know such a torrent!")
return
chunk_id = int(chunk_id)
(torrent, contents) = self.__data[filehash]
if not torrent.IsKnownChunk(contents, chunk_id):
logging.error("Hey, I don't have this chunk!")
logging.info("Current torrent contents are:\n '%s'" % contents)
return
chunk_key = self.__ChunkKey(torrent, chunk_id)
chunk_data = TorrentID.GetChunkData(contents, chunk_id)
self.__network.Send(self.__id, from_id,
"take_%s %s" % (chunk_key, chunk_data))
return
m = re.match("take_chunk_([0-9a-f]+)_([0-9]+) (.*)", contents)
if not m:
logging.error("Couldn't parse this message '%s'", msg)
return
# Process "take_chunk_<hash>_<chunk> <contents>" message
(filehash, chunk_id, chunk_data) = m.groups()
chunk_id = int(chunk_id)
if filehash not in self.__partial_torrents.keys():
logging.info("Hey, I didn't want this torrent!")
return
needed_chunks = self.__partial_torrents[filehash]
(torrent, known_contents) = self.__data[filehash]
if chunk_id not in needed_chunks:
logging.warning("%s: Hey, I didn't want this chunk! %d not in %s" % (
self.__id, chunk_id, str(needed_chunks)))
logging.warning("Current torrent contents are:\n '%s'" % known_contents)
return
if torrent.GetChunkHash(chunk_data) != torrent.partial_hashes[chunk_id]:
logging.error("Hash mismatch!") # Call security?
return
known_contents = torrent.SetChunkData(known_contents, chunk_id, chunk_data)
self.__data[filehash] = (torrent, known_contents)
print "%s: New contents are '%s'" % (self.__id, known_contents)
needed_chunks.remove(chunk_id)
chunk_key = self.__ChunkKey(torrent, chunk_id)
self.__dht.Append(chunk_key, [self.__id])
if len(needed_chunks) == 0:
logging.info("Torrent #%s download finished!" % filehash)
self.__downloaded.append(filehash)
self.__partial_torrents.pop(filehash)
return
class TorrentDhtDemoTest(unittest.TestCase):
def runTest(self):
print # ugly hack to force a newline
myfile = "AAAABBBBCCCCDDDDEEEEFF"
mytorrent = TorrentID(myfile)
network = FakeP2PNetwork()
dht = FakeDHT()
clients = []
for i in range(3):
clients.append(TorrentClient(network, dht))
if i == 0: # Seeder
clients[i].AddTorrent(mytorrent, myfile)
else: # others
clients[i].AddTorrent(mytorrent)
received_file = None
while not received_file:
for c in clients:
c.Tick() # Simulate parallel execution
received_file = clients[1].GetDownloadedTorrent()
self.assertEqual(received_file[0], mytorrent)
self.assertEqual(received_file[1], myfile)
# Run the test suite.
if __name__ == '__main__':
# replace ERROR with INFO, DEBUG, etc. and re-run. Notice any changes?
logging.basicConfig(stream=sys.stdout,
level=logging.ERROR, # Don't print anything less serious
format="%(asctime)s [%(levelname)s] %(message)s")
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| bsd-3-clause | 1,982,451,436,412,792,600 | 33.205575 | 81 | 0.640522 | false |
GoogleCloudPlatform/datacatalog-connectors-bi | google-datacatalog-sisense-connector/src/google/datacatalog_connectors/sisense/prepare/datacatalog_tag_template_factory.py | 1 | 12677 | #!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import datacatalog
from google.cloud.datacatalog import TagTemplate
from google.datacatalog_connectors.commons import prepare
from google.datacatalog_connectors.sisense.prepare import constants
class DataCatalogTagTemplateFactory(prepare.BaseTagTemplateFactory):
__BOOL_TYPE = datacatalog.FieldType.PrimitiveType.BOOL
__DOUBLE_TYPE = datacatalog.FieldType.PrimitiveType.DOUBLE
__STRING_TYPE = datacatalog.FieldType.PrimitiveType.STRING
__TIMESTAMP_TYPE = datacatalog.FieldType.PrimitiveType.TIMESTAMP
def __init__(self, project_id: str, location_id: str):
self.__project_id = project_id
self.__location_id = location_id
def make_tag_template_for_dashboard(self) -> TagTemplate:
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_DASHBOARD)
tag_template.display_name = 'Sisense Dashboard Metadata'
self._add_primitive_type_field(tag_template=tag_template,
field_id='id',
field_type=self.__STRING_TYPE,
display_name='Id',
is_required=True,
order=10)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_username',
field_type=self.__STRING_TYPE,
display_name='Owner username',
order=9)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_name',
field_type=self.__STRING_TYPE,
display_name='Owner name',
order=8)
self._add_primitive_type_field(tag_template=tag_template,
field_id='folder_id',
field_type=self.__STRING_TYPE,
display_name='Folder Id',
order=7)
self._add_primitive_type_field(tag_template=tag_template,
field_id='folder_name',
field_type=self.__STRING_TYPE,
display_name='Folder Name',
order=6)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='folder_entry',
field_type=self.__STRING_TYPE,
display_name='Data Catalog Entry for the Folder',
order=5)
self._add_primitive_type_field(tag_template=tag_template,
field_id='datasource',
field_type=self.__STRING_TYPE,
display_name='Data Source',
order=4)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='last_publish',
field_type=self.__TIMESTAMP_TYPE,
display_name='Time it was last published',
order=3)
self._add_primitive_type_field(tag_template=tag_template,
field_id='last_opened',
field_type=self.__TIMESTAMP_TYPE,
display_name='Time it was last opened',
order=2)
self._add_primitive_type_field(tag_template=tag_template,
field_id='server_url',
field_type=self.__STRING_TYPE,
display_name='Sisense Server Url',
is_required=True,
order=1)
return tag_template
def make_tag_template_for_folder(self) -> TagTemplate:
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_FOLDER)
tag_template.display_name = 'Sisense Folder Metadata'
self._add_primitive_type_field(tag_template=tag_template,
field_id='id',
field_type=self.__STRING_TYPE,
display_name='Id',
is_required=True,
order=11)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_username',
field_type=self.__STRING_TYPE,
display_name='Owner username',
order=10)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_name',
field_type=self.__STRING_TYPE,
display_name='Owner name',
order=9)
self._add_primitive_type_field(tag_template=tag_template,
field_id='parent_id',
field_type=self.__STRING_TYPE,
display_name='Id of Parent',
order=8)
self._add_primitive_type_field(tag_template=tag_template,
field_id='parent_name',
field_type=self.__STRING_TYPE,
display_name='Parent Folder',
order=7)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='parent_folder_entry',
field_type=self.__STRING_TYPE,
display_name='Data Catalog Entry for the parent Folder',
order=6)
self._add_primitive_type_field(tag_template=tag_template,
field_id='has_children',
field_type=self.__BOOL_TYPE,
display_name='Has children',
is_required=True,
order=5)
self._add_primitive_type_field(tag_template=tag_template,
field_id='child_count',
field_type=self.__DOUBLE_TYPE,
display_name='Child count',
order=4)
self._add_primitive_type_field(tag_template=tag_template,
field_id='has_dashboards',
field_type=self.__BOOL_TYPE,
display_name='Has dashboards',
is_required=True,
order=3)
self._add_primitive_type_field(tag_template=tag_template,
field_id='dashboard_count',
field_type=self.__DOUBLE_TYPE,
display_name='Dashboard count',
order=2)
self._add_primitive_type_field(tag_template=tag_template,
field_id='server_url',
field_type=self.__STRING_TYPE,
display_name='Sisense Server Url',
is_required=True,
order=1)
return tag_template
def make_tag_template_for_widget(self) -> TagTemplate:
tag_template = datacatalog.TagTemplate()
tag_template.name = datacatalog.DataCatalogClient.tag_template_path(
project=self.__project_id,
location=self.__location_id,
tag_template=constants.TAG_TEMPLATE_ID_WIDGET)
tag_template.display_name = 'Sisense Widget Metadata'
self._add_primitive_type_field(tag_template=tag_template,
field_id='id',
field_type=self.__STRING_TYPE,
display_name='Id',
is_required=True,
order=10)
self._add_primitive_type_field(tag_template=tag_template,
field_id='type',
field_type=self.__STRING_TYPE,
display_name='Type',
is_required=True,
order=9)
self._add_primitive_type_field(tag_template=tag_template,
field_id='subtype',
field_type=self.__STRING_TYPE,
display_name='Subtype',
order=8)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_username',
field_type=self.__STRING_TYPE,
display_name='Owner username',
order=7)
self._add_primitive_type_field(tag_template=tag_template,
field_id='owner_name',
field_type=self.__STRING_TYPE,
display_name='Owner name',
order=6)
self._add_primitive_type_field(tag_template=tag_template,
field_id='dashboard_id',
field_type=self.__STRING_TYPE,
display_name='Dashboard Id',
is_required=True,
order=5)
self._add_primitive_type_field(tag_template=tag_template,
field_id='dashboard_title',
field_type=self.__STRING_TYPE,
display_name='Dashboard Title',
is_required=True,
order=4)
self._add_primitive_type_field(
tag_template=tag_template,
field_id='dashboard_entry',
field_type=self.__STRING_TYPE,
display_name='Data Catalog Entry for the Dashboard',
is_required=True,
order=3)
self._add_primitive_type_field(tag_template=tag_template,
field_id='datasource',
field_type=self.__STRING_TYPE,
display_name='Data Source',
order=2)
self._add_primitive_type_field(tag_template=tag_template,
field_id='server_url',
field_type=self.__STRING_TYPE,
display_name='Sisense Server Url',
is_required=True,
order=1)
return tag_template
| apache-2.0 | 1,877,983,408,668,116,500 | 45.951852 | 78 | 0.441587 | false |
schettino72/serveronduty | websod/database.py | 1 | 1071 | import os
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import scoped_session, sessionmaker
metadata = MetaData()
def get_sa_db_uri(driver='', username='', password='', host='', port='', database=''):
"""get SQLAlchemy DB URI: driver://username:password@host:port/database"""
assert driver
if driver == 'sqlite':
# get absolute file path
if not database.startswith('/'):
db_file = os.path.abspath(database)
else:
db_file = database
db_uri = '%s:///%s' % (driver, db_file)
else:
db_uri = ('%s://%s:%s@%s:%s/%s' %
(driver, username, password, host, port, database))
return db_uri
class DB(object):
def __init__(self, db_uri):
self.engine = create_engine(db_uri, convert_unicode=True)
self.session = scoped_session(
sessionmaker(autocommit=False,
autoflush=False,
bind=self.engine))
def init_database(self):
metadata.create_all(bind=self.engine)
| mit | 8,379,561,955,119,394,000 | 27.184211 | 86 | 0.577031 | false |
kyrsjo/AcdOpti | src/acdOpti/AcdOptiRunner.py | 1 | 23813 | # -*- coding: utf8 -*-
#
# Copyright 2011 Kyrre Ness Sjøbæk
# This file is part of AcdOpti.
#
# AcdOpti is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AcdOpti is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AcdOpti. If not, see <http://www.gnu.org/licenses/>.
from AcdOptiFileParser import DataDict,\
AcdOptiFileParser_simple
from AcdOptiSettings import AcdOptiSettings
from AcdOptiExceptions import AcdOptiException_optiRunner_createFail,\
AcdOptiException_optiRunner_loadFail,\
AcdOptiException_optiRunner_remoteProblem,\
AcdOptiException_optiRunner_stageError,\
AcdOptiException_optiRunner_runError
import paramiko
from SSHspeedtransfer import SSHspeedtransfer
import os, math, re, tarfile
class AcdOptiRunner:
"""
Baseclass for runners, which handle uploading the job to HPC etc.
Dependent on being "owned" by an AcdOptiRunConfig.
"""
folder = None
runConfig = None
type = None
lockdown = None # Is the runner currently not writable? (not enforced)
def __init__(self):
raise NotImplementedError("Don't initialize the interface AcdOptiRunner")
def stage(self):
"Prepare all files etc. for running the job"
raise NotImplementedError
def isRemote(self):
"Returns True if this runner is using a remote resource"
raise NotImplementedError
def upload(self):
"""
If the job is remote, upload the staged data.
Raises an AcdOptiException_optiRunner_remoteProblem
if there is a problem, error message as argument.
"""
raise NotImplementedError
def run(self):
"Start a job with this configuration"
raise NotImplementedError
def remoteCleanup(self):
"Delete files belonging to this job on remote server"
raise NotImplementedError
def cancelRun(self):
"Stop a running job with this configuration"
raise NotImplementedError
def queryStatus(self):
"Query the status of this job, returning one of AcdOptiRunConfig.statuses"
raise NotImplementedError
def getRemoteData(self):
"""
If the job is remote, download the data.
Returns the path to the folder with the finished data.
"""
raise NotImplementedError
def refreshLockdown(self):
"""
Checks the status of the runConfig and uses that to determine the lockdown setting.
"""
if self.runConfig.status == "not_initialized" or self.runConfig.status == "initialized":
self.lockdown = False
else:
self.lockdown = True
@staticmethod
def getRunner(type, runConfig):
""""
Get a runner instance of the specified type.
Acts like a constructor constructing the correct type of runner
"""
folder = runConfig.folder
if not os.path.isdir(os.path.join(folder, "stage")):
raise AcdOptiException_optiRunner_loadFail("Missing subfolder 'stage'")
if type.split("::")[0] == "Hopper":
return AcdOptiRunner_Hopper(runConfig)
else:
raise AcdOptiException_optiRunner_loadFail("Unknown type '" + type +"'")
def cloneInto(self,cloneFrom):
"""
Empty this runner and copy the data from cloneFrom.
"""
raise NotImplementedError
@staticmethod
def createNew(type, folder):
"""
Create a new runner instance of the specified type in folder.
Allows sending options to the subclass createNew()s by the syntax
Name::Options
"""
if type.split("::")[0] == "Hopper":
AcdOptiRunner_Hopper.createNew(folder, type)
else:
raise AcdOptiException_optiRunner_createFail("Unkown type '" + type + "'")
class AcdOptiRunner_Hopper(AcdOptiRunner):
type = "Hopper"
CPUsPerNode = 24
hostname = "hopper.nersc.gov"
#commonExecs = {"Omega3P::2011May23":"~candel/.community/hopper2/omega3p-2011May23"}
__sshClient = None #Try to keep using the same SSHClient
__paramFile = None
#PBSjobName = None
remoteJobID = None
speedSSH = None
def __init__(self,runConfig):
self.runConfig = runConfig
self.folder = self.runConfig.folder
self.__paramFile = AcdOptiFileParser_simple(os.path.join(self.folder,"paramFile_acdOptiRunner_Hopper.set"), 'rw')
if not self.__paramFile.dataDict["fileID"] == "AcdOptiRunner_Hopper":
raise AcdOptiException_optiRunner_loadFail("Wrong fileID, got'"+self.__paramFile.dataDict["fileID"] + "'")
self.remoteJobID = self.__paramFile.dataDict["remoteJobID"]
if self.remoteJobID == "":
self.remoteJobID = None
if self.remoteJobID != None and not self.runConfig.status.startswith("remote::"):
print "WARNING: Found remoteJobID, but status='" + self.runConfig.status + "'"
#raise AcdOptiException_optiRunner_loadFail("Found remoteJobID, but status='" + self.runConfig.status + "'")
elif self.remoteJobID == None and (self.runConfig.status == "remote::queued" or self.runConfig.status == "remote::running"):
raise AcdOptiException_optiRunner_loadFail("Did not find remoteJobID, but status='" + self.runConfig.status + "'")
self.speedSSH = SSHspeedtransfer(self.hostname, AcdOptiSettings().getSetting("hopperUser"))
def getTorqueMeta(self):
"Returns a pointer to the TorqueMeta data structure"
return self.__paramFile.dataDict["TorqueMeta"]
def getJobs(self):
"Return a pointer to the jobs data structure"
return self.__paramFile.dataDict["jobs"]
def isRemote(self):
return True
def __connectSSH(self):
"""
Method that setups a ssh connection, returning a paramiko.SSHClient object.
Please close() any SFTPClients opened, but don't close the client itself,
as this method tries to reuse an existing client (connecting is what takes most of the time)
"""
print "Connecting..."
if self.__sshClient != None:
print "Found an old client"
if self.__sshClient.get_transport() == None or self.__sshClient.get_transport().is_active() != True:
print "Old client not active."
else:
"Old client OK!"
return self.__sshClient
print "Couldn't use old client, creating a new one."
username = AcdOptiSettings().getSetting("hopperUser")
client = paramiko.SSHClient()
client.load_host_keys(os.path.expanduser(os.path.join("~", ".ssh", "known_hosts")))
client.connect(self.hostname, username=username)
#client.load_system_host_keys()
print "Connected."
self.__sshClient = client
return client
def __SSHkillDir(self, dir, sftp):
"""
Recursively delete directory dir and its contents using a sftp connection
"""
#Emptying the folder...
iDir = ""
if dir[-1] == "/":
iDir = dir[:-1]
else:
iDir = dir
fileList = sftp.listdir(iDir)
for file in fileList:
try:
sftp.remove(iDir + "/" + file)
except IOError:
#Directory
self.__SSHkillDir(iDir + "/" + file, sftp)
sftp.rmdir(iDir)
def upload(self):
stageFile = self.runConfig.stageFile
#Check for programming errors
assert stageFile != None
assert os.path.isfile(stageFile)
#Setup the ssh connection
username = AcdOptiSettings().getSetting("hopperUser")
client = self.__connectSSH()
#Look for the acdopti scratch directory
sftp = client.open_sftp()
remoteDir = "/scratch/scratchdirs/" + username + "/"
remoteScratch = remoteDir + "acdopti_scratch/"
remoteDirList = sftp.listdir(remoteDir)
if not "acdopti_scratch" in remoteDirList:
print "Making directory..."
print remoteDirList
sftp.mkdir(remoteScratch)
scratchDirList = sftp.listdir(remoteScratch)
#Put the file
if os.path.split(stageFile)[1] in scratchDirList:
print "File already on HPC?!?"
#client.close()
sftp.close()
return
print "Uploading file..."
remoteFile = remoteScratch + os.path.split(stageFile)[1]
self.speedSSH.put(stageFile, remoteScratch + os.path.split(stageFile)[1])
#sftp.put(stageFile, remoteScratch + os.path.split(stageFile)[1])
print "Uploading finished."
#Unzip
print "Unzipping..."
print "COMMAND:", "cd " + remoteScratch +"; tar xzvf " + remoteFile # + " --directory " + remoteScratch
(ssh_stdin, ssh_stdout, ssh_stderr) = client.exec_command("cd " + remoteScratch +"; tar xzvf " + remoteFile) #+ " --directory " + remoteScratch)
(ssh_stdout_str, ssh_stderr_str) = (ssh_stdout.read(), ssh_stderr.read())
print "STDOUT:", ssh_stdout_str
print "STDERR:", ssh_stderr_str
print "Unzipped."
if len(ssh_stderr_str):
#client.close()
sftp.close()
raise AcdOptiException_optiRunner_remoteProblem("Problem while unzipping, see output")
#Delete the remote tar.gz
print "Deleting tar.gz..."
dirList = sftp.listdir(remoteScratch)
if os.path.split(self.runConfig.stageFile)[1] in dirList:
sftp.remove(remoteFile)
print "Deleted."
else:
print "Already gone."
sftp.close()
#client.close()
def remoteCleanup(self):
assert self.runConfig.status in ["remote::uploaded", "remote::finished", "remote::unclean"]
#Make connection...
username = AcdOptiSettings().getSetting("hopperUser")
client = self.__connectSSH()
sftp = client.open_sftp()
remoteDir = "/scratch/scratchdirs/" + username + "/"
remoteScratch = remoteDir + "acdopti_scratch/"
remoteFile = remoteScratch + os.path.split(self.runConfig.stageFile)[1]
remoteFinishedFile = remoteScratch + self.runConfig.stageName + "--finished.tar.gz"
#Delete the remote tar.gz's
print "Deleting remote stage tar.gz..."
dirList = sftp.listdir(remoteScratch)
if os.path.split(self.runConfig.stageFile)[1] in dirList:
sftp.remove(remoteFile)
print "Deleted."
else:
print "Already gone."
print "Deleting remote finished tar.gz..."
if os.path.split(remoteFinishedFile)[1] in dirList:
sftp.remove(remoteFinishedFile)
print "Deleted."
else:
print "Already gone."
#Delete the remote folder
print "Deleting remote folder..."
if self.runConfig.stageName in dirList:
self.__SSHkillDir(remoteScratch + "/" + self.runConfig.stageName, sftp)
else:
print "Already gone."
sftp.close()
client.close()
def run(self):
#Make connection...
username = AcdOptiSettings().getSetting("hopperUser")
client = self.__connectSSH()
remoteDir = "/scratch/scratchdirs/" + username + "/"
remoteScratch = remoteDir + "acdopti_scratch/"
#Submit job
print "Submitting..."
(ssh_stdin, ssh_stdout, ssh_stderr) = client.exec_command("cd " + remoteScratch + self.runConfig.stageName + "; qsub " + remoteScratch + self.runConfig.stageName + "/run.pbs")
(ssh_stdout_str, ssh_stderr_str) = (ssh_stdout.read(), ssh_stderr.read())
print "STDOUT:", ssh_stdout_str
print "STDERR:", ssh_stderr_str
print "Submitted."
#client.close()
if len(ssh_stderr_str):
raise AcdOptiException_optiRunner_remoteProblem("Problem during submission, see output")
#Check if the stdout matches XXXXX.YYY, where XXXXX is a number, and YYY is letters.
# This is then the job ID.
if re.match("[0-9]+\.\w+$", ssh_stdout_str):
self.remoteJobID = ssh_stdout_str.strip()
self.__paramFile.dataDict.setValSingle("remoteJobID", self.remoteJobID)
print "Submission successful, JobID='" + self.remoteJobID + "'"
else:
raise AcdOptiException_optiRunner_runError("Problem with job submission, see standard output")
self.write()
def cancelRun(self):
assert self.remoteJobID != None
#Make connection...
client = self.__connectSSH()
#Cancel the current job
print "Issuing cancel command..."
(ssh_stdin, ssh_stdout, ssh_stderr) = client.exec_command("qdel " + self.remoteJobID)
(ssh_stdout_str, ssh_stderr_str) = (ssh_stdout.read(), ssh_stderr.read())
print "STDOUT:", ssh_stdout_str
print "STDERR:", ssh_stderr_str
print "Cancel command issued."
#client.close()
if len(ssh_stderr_str):
if "Unknown Job Id " + self.remoteJobID in ssh_stderr_str:
#Aready finished
print "Job was already finished"
self.remoteJobID = None
self.write()
return
elif "errno=15096" in ssh_stderr_str:
#Something went wrong with TORQUE on the server - but the job is very much dead...
print "Torque problem, but job is dead. (see output)"
self.remoteJobID = None
self.write()
return
raise AcdOptiException_optiRunner_remoteProblem("Problem during cancel, see output")
self.write()
def queryStatus(self):
assert self.runConfig.status == "remote::queued" or self.runConfig.status == "remote::running", "status = '" + self.runConfig.status + "'"
#Make connection
client = self.__connectSSH()
print "Getting status..."
(ssh_stdin, ssh_stdout, ssh_stderr) = client.exec_command("qstat " + self.remoteJobID)
(ssh_stdout_str, ssh_stderr_str) = (ssh_stdout.read(), ssh_stderr.read())
print "STDOUT:", ssh_stdout_str
print "STDERR:", ssh_stderr_str
print "Got status."
#client.close()
#Parse the status output:
if len(ssh_stderr_str):
if "Unknown Job Id Error " + self.remoteJobID in ssh_stderr_str:
self.remoteJobID = None
self.write()
return "remote::finished"
raise AcdOptiException_optiRunner_remoteProblem("Problem while getting status, see output")
statusline = ""
for line in ssh_stdout_str.splitlines():
if line.startswith(self.remoteJobID):
statusline = line
break
statusChar = statusline.split()[-2]
print "statusLine='" + statusline + "', statusChar='" + statusChar + "'"
if statusChar == "Q":
return "remote::queued"
elif statusChar == "R" or statusChar == "E" or statusChar == "T": #E: Exiting after having run, T: transfer
return "remote::running"
elif statusChar == "C":
self.remoteJobID = None
self.write()
return "remote::finished"
else:
raise ValueError("Unknown status char '" + statusChar + "'")
def getRemoteData(self):
assert self.runConfig.status=="remote::finished" or self.runConfig.status=="remote::unclean"
#assert self.remoteJobID == None
if self.remoteJobID != None and not self.runConfig.status.startswith("remote::"):
print "WARNING: Found remoteJobID, but status='" + self.runConfig.status + "'"
finishedLocalPath=os.path.join(self.folder, "finished")
username = AcdOptiSettings().getSetting("hopperUser")
remoteDir = "/scratch/scratchdirs/" + username + "/"
remoteScratch = remoteDir + "acdopti_scratch/"
#remoteJobDir = remoteScratch + self.runConfig.stageName + "/"
remoteFile = self.runConfig.stageName + "--finished.tar.gz"
#Make connection
client = self.__connectSSH()
#sftp = client.open_sftp()
#Tar the data
print "Zipping..."
command = "cd " + remoteScratch +"; tar czvf " + remoteFile + " --force-local " + self.runConfig.stageName
print "COMMAND:", command
(ssh_stdin, ssh_stdout, ssh_stderr) = client.exec_command(command)
(ssh_stdout_str, ssh_stderr_str) = (ssh_stdout.read(), ssh_stderr.read())
print "STDOUT:", ssh_stdout_str
print "STDERR:", ssh_stderr_str
print "Zipped."
if len(ssh_stderr_str):
#client.close()
raise AcdOptiException_optiRunner_remoteProblem("Problem during zipping, see output")
#Download the tarball
self.speedSSH.get(remoteScratch + remoteFile, os.path.join(finishedLocalPath, remoteFile))
#sftp.get(remoteScratch + remoteFile, os.path.join(finishedLocalPath, remoteFile)) #TOO SLOW over transatlantic link...
#sftp.close()
#client.close()
#Unzip the downloaded solution tar.gz
archive = tarfile.open(os.path.join(finishedLocalPath, remoteFile), "r:gz")
archive.extractall(path=finishedLocalPath)
return os.path.join(finishedLocalPath, self.runConfig.stageName) #Duplicated code in runConfig::init()!
def stage(self):
self.__makePBS()
def __makePBS(self):
"""
Creates the run.pbs file used by Hopper's batch system
"""
#Calculate number of mpi nodes needed, and build the commands
jobs = self.__paramFile.dataDict["jobs"]
commands = []
numNodes = 0
for jobName, job in jobs:
command = None
if DataDict.boolconv(job["aprun"]):
if job["tasksNode"] == "-1":
nodesThis = int(math.ceil(int(job["tasks"])/float(self.CPUsPerNode)))
else:
assert int(job["tasksNode"]) <= self.CPUsPerNode
nodesThis = int(math.ceil(int(job["tasks"])/float(job["tasksNode"])))
if nodesThis > numNodes:
numNodes = nodesThis
def makeOption(optionName, key, optional):
get = job[key]
if get == "-1" and optional:
return ""
return optionName + " " + get + " "
command = "aprun " + makeOption("-n", "tasks", False)\
+ makeOption("-N", "tasksNode", True)\
+ makeOption("-S", "tasksNuma", True)\
+ job["command"] + " " + job["commandArgs"]
else:
command = job["command"]
commands.append(command)
if len(commands) == 0:
raise AcdOptiException_optiRunner_stageError("No commands built")
if not numNodes > 0:
raise AcdOptiException_optiRunner_stageError("Got numNodes="+str(numNodes))
#Write PBS file header
runpbs = open(os.path.join(self.runConfig.stageFolder, "run.pbs"), 'w')
runpbs.write("#!/bin/bash\n")
torqueMeta = self.getTorqueMeta()
runpbs.write("#PBS -q " + torqueMeta["queue"] + "\n")
runpbs.write("#PBS -l mppwidth=" + str(numNodes*self.CPUsPerNode) + "\n")
runpbs.write("#PBS -l walltime=" + torqueMeta["walltime"] + "\n")
runpbs.write("#PBS -N " + self.runConfig.stageName + "\n")
runpbs.write("#PBS -A " + torqueMeta["repo"] + "\n")
if DataDict.boolconv(torqueMeta["importVars"]):
runpbs.write("#PBS -V\n")
runpbs.write("\n\n")
#Write PBS script
runpbs.write("## Commands:\n")
for command in commands:
runpbs.write(command + "\n")
runpbs.close()
#Move it to stage folder
#os.rename(os.path.join(self.folder, "run.pbs"), os.path.join(self.folder, "stage", "run.pbs"))
def __del__(self):
if self.__sshClient != None:
self.__sshClient.close()
def write(self):
self.__paramFile.write()
def cloneInto(self,cloneFrom):
"""
Empty this runner and copy the data from cloneFrom.
"""
#Copy the torque stuff
torqueOriginal = cloneFrom.getTorqueMeta()
torqueThis = self.getTorqueMeta()
torqueThis.clear()
for (k,v) in torqueOriginal.copy():
torqueThis.pushBack(k,v)
#Copy the jobs stuff
jobsOriginal = cloneFrom.getJobs()
jobsThis = self.getJobs()
jobsThis.clear()
for (k,v) in jobsOriginal.copy():
jobsThis.pushBack(k,v)
self.write()
@staticmethod
def createNew(folder, type):
#Create the settings file
paramFile = AcdOptiFileParser_simple(os.path.join(folder,"paramFile_acdOptiRunner_Hopper.set"),"w")
paramFile.dataDict.pushBack("fileID", "AcdOptiRunner_Hopper")
paramFile.dataDict.pushBack("remoteJobID", "")
#Set default torque meta stuff
torqueMeta = paramFile.dataDict.pushBack("TorqueMeta", DataDict())
torqueMeta.pushBack("queue", "regular")
torqueMeta.pushBack("walltime", "00:59:00")
torqueMeta.pushBack("repo", "m349")
torqueMeta.pushBack("importVars", "True")
#Create a datastructure for storing aprun jobs
jobs = paramFile.dataDict.pushBack("jobs", DataDict())
# Each aprun job has the following fields:
# - aprun: Boolean, true in the case of aprun jobs
# - command: Command to run
# - commandArgs: Arguments to pass to the executable (such as name of input file)
# - tasks: Number of MPI tasks, -n. Essential!
# - tasksNode: Number of MPI tasks pr. node, -N. Optional.
# - tasksNuma: Number of MPI tasks pr. NUMA node, -S. Optional.
# If the aprun flag is False, then only command is used (but all keys should be present!)
# Optional args should also be present. Set to "-1" to disable.
#This command is always needed.
cdpbs = jobs.pushBack("cdPBS", DataDict())
cdpbs.pushBack("aprun", "False")
cdpbs.pushBack("command", "cd $PBS_O_WORKDIR")
cdpbs.pushBack("commandArgs", "")
cdpbs.pushBack("tasks", "-1")
cdpbs.pushBack("tasksNode", "-1")
cdpbs.pushBack("tasksNuma", "-1")
paramFile.write()
| gpl-3.0 | 7,559,219,834,247,574,000 | 39.494898 | 183 | 0.585822 | false |
blinkseb/script.tv.betaseries | json.py | 1 | 10173 | import string
import types
## json.py implements a JSON (http://json.org) reader and writer.
## Copyright (C) 2005 Patrick D. Logan
## Contact mailto:[email protected]
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
class _StringGenerator(object):
def __init__(self, string):
self.string = string
self.index = -1
def peek(self):
i = self.index + 1
if i < len(self.string):
return self.string[i]
else:
return None
def next(self):
self.index += 1
if self.index < len(self.string):
return self.string[self.index]
else:
raise StopIteration
def all(self):
return self.string
class WriteException(Exception):
pass
class ReadException(Exception):
pass
class JsonReader(object):
hex_digits = {'A': 10,'B': 11,'C': 12,'D': 13,'E': 14,'F':15}
escapes = {'t':'\t','n':'\n','f':'\f','r':'\r','b':'\b'}
def read(self, s):
self._generator = _StringGenerator(s)
result = self._read()
return result
def _read(self):
self._eatWhitespace()
peek = self._peek()
if peek is None:
raise ReadException, "Nothing to read: '%s'" % self._generator.all()
if peek == '{':
return self._readObject()
elif peek == '[':
return self._readArray()
elif peek == '"':
return self._readString()
elif peek == '-' or peek.isdigit():
return self._readNumber()
elif peek == 't':
return self._readTrue()
elif peek == 'f':
return self._readFalse()
elif peek == 'n':
return self._readNull()
elif peek == '/':
self._readComment()
return self._read()
else:
raise ReadException, "Input is not valid JSON: '%s'" % self._generator.all()
def _readTrue(self):
self._assertNext('t', "true")
self._assertNext('r', "true")
self._assertNext('u', "true")
self._assertNext('e', "true")
return True
def _readFalse(self):
self._assertNext('f', "false")
self._assertNext('a', "false")
self._assertNext('l', "false")
self._assertNext('s', "false")
self._assertNext('e', "false")
return False
def _readNull(self):
self._assertNext('n', "null")
self._assertNext('u', "null")
self._assertNext('l', "null")
self._assertNext('l', "null")
return None
def _assertNext(self, ch, target):
if self._next() != ch:
raise ReadException, "Trying to read %s: '%s'" % (target, self._generator.all())
def _readNumber(self):
isfloat = False
result = self._next()
peek = self._peek()
while peek is not None and (peek.isdigit() or peek == "."):
isfloat = isfloat or peek == "."
result = result + self._next()
peek = self._peek()
try:
if isfloat:
return float(result)
else:
return int(result)
except ValueError:
raise ReadException, "Not a valid JSON number: '%s'" % result
def _readString(self):
result = ""
self._next()
try:
while self._peek() != '"':
ch = self._next()
if ch == "\\":
ch = self._next()
if ch in 'brnft':
ch = self.escapes[ch]
elif ch == "u":
ch4096 = self._next()
ch256 = self._next()
ch16 = self._next()
ch1 = self._next()
n = 4096 * self._hexDigitToInt(ch4096)
n += 256 * self._hexDigitToInt(ch256)
n += 16 * self._hexDigitToInt(ch16)
n += self._hexDigitToInt(ch1)
ch = unichr(n)
elif ch not in '"/\\':
raise ReadException, "Not a valid escaped JSON character: '%s' in %s" % (ch, self._generator.all())
result = result + ch
except StopIteration:
raise ReadException, "Not a valid JSON string: '%s'" % self._generator.all()
self._next()
return result
def _hexDigitToInt(self, ch):
try:
result = self.hex_digits[ch.upper()]
except KeyError:
try:
result = int(ch)
except ValueError:
raise ReadException, "The character %s is not a hex digit." % ch
return result
def _readComment(self):
self._next()
second = self._next()
if second == "/":
self._readDoubleSolidusComment()
elif second == '*':
self._readCStyleComment()
else:
raise ReadException, "Not a valid JSON comment: %s" % self._generator.all()
def _readCStyleComment(self):
try:
done = False
while not done:
ch = self._next()
done = (ch == "*" and self._peek() == "/")
if not done and ch == "/" and self._peek() == "*":
raise ReadException, "Not a valid JSON comment: %s, '/*' cannot be embedded in the comment." % self._generator.all()
self._next()
except StopIteration:
raise ReadException, "Not a valid JSON comment: %s, expected */" % self._generator.all()
def _readDoubleSolidusComment(self):
try:
ch = self._next()
while ch != "\r" and ch != "\n":
ch = self._next()
except StopIteration:
pass
def _readArray(self):
result = []
self._next()
done = self._peek() == ']'
while not done:
item = self._read()
result.append(item)
self._eatWhitespace()
done = self._peek() == ']'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
self._next()
return result
def _readObject(self):
result = {}
a = self._next()
assert a == '{'
done = self._peek() == '}'
while not done:
key = self._read()
if type(key) is not types.StringType:
raise ReadException, "Not a valid JSON object key (should be a string): %s" % key
self._eatWhitespace()
ch = self._next()
if ch != ":":
raise ReadException, "Not a valid JSON object: '%s' due to: '%s'" % (self._generator.all(), ch)
self._eatWhitespace()
val = self._read()
result[key] = val
self._eatWhitespace()
done = self._peek() == '}'
if not done:
ch = self._next()
if ch != ",":
raise ReadException, "Not a valid JSON array: '%s' due to: '%s'" % (self._generator.all(), ch)
self._next()
return result
def _eatWhitespace(self):
p = self._peek()
while p is not None and p in string.whitespace or p == '/':
if p == '/':
self._readComment()
else:
self._next()
p = self._peek()
def _peek(self):
return self._generator.peek()
def _next(self):
return self._generator.next()
class JsonWriter(object):
def _append(self, s):
self._results.append(s)
def write(self, obj, escaped_forward_slash=False):
self._escaped_forward_slash = escaped_forward_slash
self._results = []
self._write(obj)
return "".join(self._results)
def _write(self, obj):
ty = type(obj)
if ty is types.DictType:
n = len(obj)
self._append("{")
for k, v in obj.items():
self._write(k)
self._append(":")
self._write(v)
n = n - 1
if n > 0:
self._append(",")
self._append("}")
elif ty is types.ListType or ty is types.TupleType:
n = len(obj)
self._append("[")
for item in obj:
self._write(item)
n = n - 1
if n > 0:
self._append(",")
self._append("]")
elif ty is types.StringType or ty is types.UnicodeType:
self._append('"')
obj = obj.replace('\\', r'\\')
if self._escaped_forward_slash:
obj = obj.replace('/', r'\/')
obj = obj.replace('"', r'\"')
obj = obj.replace('\b', r'\b')
obj = obj.replace('\f', r'\f')
obj = obj.replace('\n', r'\n')
obj = obj.replace('\r', r'\r')
obj = obj.replace('\t', r'\t')
self._append(obj)
self._append('"')
elif ty is types.IntType or ty is types.LongType:
self._append(str(obj))
elif ty is types.FloatType:
self._append("%f" % obj)
elif obj is True:
self._append("true")
elif obj is False:
self._append("false")
elif obj is None:
self._append("null")
else:
raise WriteException, "Cannot write in JSON: %s" % repr(obj)
def write(obj, escaped_forward_slash=False):
return JsonWriter().write(obj, escaped_forward_slash)
def read(s):
return JsonReader().read(s)
| gpl-2.0 | -2,107,853,004,309,323,000 | 31.710611 | 136 | 0.505751 | false |
datamade/rlr | rlr/crossvalidation.py | 1 | 4227 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
from builtins import range
import numpy
import logging
import warnings
import collections
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def gridSearch(examples,
labels,
learner,
num_cores,
k=3,
search_space=[.00001, .0001, .001, .01, .1, 1],
randomize=True):
if num_cores < 2 :
from multiprocessing.dummy import Pool
else :
from .backport import Pool
repeats = max(1, int(150/len(labels)))
pool = Pool()
logger.info('using cross validation to find optimum alpha...')
alpha_tester = AlphaTester(learner)
alpha_scores = collections.defaultdict(list)
for repeat in range(repeats):
permutation = numpy.random.permutation(labels.size)
examples = examples[permutation]
labels = labels[permutation]
labeled_examples = (examples, labels)
for alpha in search_space:
score_jobs = [pool.apply_async(alpha_tester,
(subset, validation, alpha))
for subset, validation in
kFolds(labeled_examples, k)]
scores = [job.get() for job in score_jobs]
alpha_scores[alpha].extend(scores)
best_alpha, score = max(alpha_scores.items(),
key=lambda x: reduceScores(x[1]))
logger.info('optimum alpha: %f, score %s' % (best_alpha, reduceScores(score)))
pool.close()
pool.join()
return best_alpha
# http://code.activestate.com/recipes/521906-k-fold-cross-validation-partition/
def kFolds(labeled_examples, k):
examples, labels = labeled_examples
if k < 2 :
raise ValueError("Number of folds must be at least 2")
if len(labels) < 2 :
raise ValueError("At least two training datum are required")
for i in range(k):
selected_indices = range(i, examples.shape[0], k)
validation = (examples[selected_indices, :],
labels[selected_indices])
training = (numpy.delete(examples, selected_indices, axis=0),
numpy.delete(labels, selected_indices))
if len(training[1]) and len(validation[1]) :
yield (training, validation)
else :
warnings.warn("Only providing %s folds out of %s requested" %
(i, k))
break
class AlphaTester(object) :
def __init__(self, learner) : # pragma : no cover
self.learner = learner
def __call__(self, training, validation, alpha) :
training_examples, training_labels = training
self.learner.alpha = alpha
self.learner.fit_alpha(training_examples, training_labels, None)
validation_examples, validation_labels = validation
predictions = self.learner.predict_proba(validation_examples)
return scorePredictions(validation_labels, predictions)
def scorePredictions(true_labels, predictions) :
# http://en.wikipedia.org/wiki/Matthews_correlation_coefficient
true_dupes = int(numpy.sum(predictions[true_labels == 1] > 0.5))
false_dupes = int(numpy.sum(predictions[true_labels == 0] > 0.5))
true_distinct = int(numpy.sum(predictions[true_labels == 0] <= 0.5))
false_distinct = int(numpy.sum(predictions[true_labels == 1] <= 0.5))
if not (true_dupes + false_dupes) * (true_distinct + false_distinct) :
return 0
matthews_cc = ((true_dupes * true_distinct
- false_dupes * false_distinct)
/numpy.sqrt((true_dupes + false_dupes)
* (true_dupes + false_distinct)
* (true_distinct + false_dupes)
* (true_distinct + false_distinct)))
return matthews_cc
def reduceScores(scores) :
scores = [score for score in scores
if score is not None and not numpy.isnan(score)]
if scores :
average_score = sum(scores)/len(scores)
else :
average_score = 0
return average_score
| bsd-3-clause | 3,518,832,934,487,426,000 | 29.410072 | 82 | 0.588597 | false |
podbregarb/prvi-projekt | prop.py | 1 | 48496 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
# Združljivost za Python 2 in Python 3
try:
basestring
except NameError:
basestring = str
# Ali naj se seznami konjunktov in disjunktov sortirajo?
# Nastavi na list za nesortiranje
# Nastavi na sorted za sortiranje
sortSet = sorted
def paren(s, level, expl):
"""Postavi oklepaje okoli izraza.
Vrne niz s, ko je level <= expl, niz s, obdan z oklepaji, sicer.
Argumenti:
s -- niz za izpis
level -- nivo postavljanja oklepajev
exp -- najmanjša vrednost argumenta level, da se izpišejo oklepaji
"""
return s if level <= expl else '('+s+')'
def isLiteral(s):
"""Ugotovi, ali je s niz, ki predstavlja logično spremenljivko.
Argument:
s -- ime spremenljivke
"""
return isinstance(s, basestring) and re.match(r'^[a-z][a-z0-9]*$', s)
def nnf(f):
"""Vrne izraz f v negacijski normalni obliki, torej brez implikacij
in z negacijami samo neposredno na spremenljivkah.
Argument:
f -- logični izraz
"""
return f.simplify()
def cnf(f):
"""Vrne izraz f v konjunktivni normalni obliki, torej kot konjunkcijo
enega ali več disjunkcij spremenljivk in njihovih negacij.
Argument:
f -- logični izraz
"""
return f.flatten().cnf()
def dnf(f):
"""Vrne izraz f v disjunktivni normalni obliki, torej kot disjunkcijo
enega ali več konjunkcij spremenljivk in njihovih negacij.
Argument:
f -- logični izraz
"""
return f.flatten().dnf()
def getValues(d, root=None, p=None):
"""Vrne prireditve vrednosti spremenljivkam.
Če katera od spremenljivk nima vrednosti, vrne None. V nasprotnem primeru
prireditve vrne v obliki slovarja.
Argumenta:
d -- slovar podizrazov
root -- koren grafa
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if root != None:
if not root.getSure(p):
return root
val = {k.p: v.getValue(p) for (k, v) in d.items() if isinstance(k, Literal)}
if root == None and None in val.values():
return None
else:
return {k: v for (k, v) in val.items() if v != None}
def sat(f, d=None, root=False, trace=False):
"""Poskusi določiti izpolnljivost logične formule f s pomočjo linearnega
algoritma.
Če ugotovi, da formula ni izpolnljiva, vrne False.
Če najde prireditev vrednosti spremenljivkam, da je formula izpolnljiva,
jo vrne v obliki slovarja.
Če ne ugotovi, ali je formula izpolnljiva, vrne None.
Argumenti:
f -- logični izraz
d -- slovar podizrazov, privzeto None (naredi nov slovar)
root -- ali naj se vrne koren grafa v primeru neodločenosti
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if not type(d) == dict:
d = {}
n = f.simplify().ncf().node(d)
if not n.valuate(True, (None, 0), None, trace):
return False
out = getValues(d, n)
if not root and type(out) != dict:
return None
else:
return out
def sat3(f, d=None, root=False, trace=False):
"""Poskusi določiti izpolnljivost logične formule f s pomočjo kubičnega
algoritma.
Če ugotovi, da formula ni izpolnljiva, vrne False.
Če najde prireditev vrednosti spremenljivkam, da je formula izpolnljiva,
jo vrne v obliki slovarja.
Če ne ugotovi, ali je formula izpolnljiva, vrne None.
Argumenti:
f -- logični izraz
d -- slovar podizrazov, privzeto None (naredi nov slovar)
root -- ali naj se vrne koren grafa v primeru neodločenosti
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if not type(d) == dict:
d = {}
rt = sat(f, d, True, trace)
if rt == False or type(rt) == dict:
return rt
next = sum([[(n, k) for k in range(n.numVariants()) if n.v[k] == None] for n in d.values()], [])
lt = len(next)
ln = lt+1
while lt < ln:
todo = next
next = []
for n, k in todo:
if n.v[k] != None:
continue
if trace > 1:
print("Trying to assign temporary values to %d:%s" % (k, n))
if n.valuate(True, (None, k), (True, k), trace):
s = getValues(d, rt, True)
if type(s) == dict:
return s
if n.valuate(False, (None, k), (False, k), trace):
s = getValues(d, rt, False)
if type(s) == dict:
return s
for nn in d.values():
nn.clearTemp()
else:
for nn in d.values():
for i in range(nn.numVariants()):
if nn.vt[i] != None:
nn.setValue(nn.vt[i], nn.ct[i], (None, i))
nn.clearTemp()
else:
for nn in d.values():
nn.clearTemp()
if n.valuate(False, (None, k), (None, k), trace):
s = getValues(d, rt)
if type(s) == dict:
return s
else:
return False
if n.v[k] != None:
next.append((n, k))
ln = lt
lt = len(next)
if root:
return rt
else:
False
def dpllStep(l, trace=False):
"""Korak metode DPLL.
Argumenta:
l -- seznam disjunktov
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
num = 1
out = []
while num > 0:
while num > 0:
literals = {}
next = []
for x in l:
if isinstance(x, Literal):
if x.p in literals and not literals[x.p]:
if trace:
print("Contradiction for literal %s" % x.p)
return False
else:
literals[x.p] = True
elif isinstance(x, Not):
if x.t.p in literals and literals[x.t.p]:
if trace:
print("Contradiction for literal %s" % x.p)
return False
else:
literals[x.t.p] = False
elif len(x.l) == 0:
if trace:
print("Empty disjunction found")
return False
elif not any([Not(y) in x.l for y in x.l if isinstance(y, Literal)]):
next.append(x)
num = len(literals)
out += literals.items()
l = [y for y in [x.apply(literals) for x in next] if not isinstance(y, And)]
if trace > 1:
print("Found %d literals: %s, simplified to %s" % (num, literals, l))
pure = {}
for d in l:
for x in d.l:
if isinstance(x, Literal):
pure[x.p] = None if (x.p in pure and pure[x.p] != True) else True
else:
pure[x.t.p] = None if (x.t.p in pure and pure[x.t.p] != False) else False
purs = [(k, v) for (k, v) in pure.items() if v != None]
num = len(purs)
out += purs
l = [y for y in [x.apply(dict(purs)) for x in l] if not isinstance(y, And)]
if trace > 1:
print("Found %d pures: %s, simplified to %s" % (num, purs, l))
if len(l) == 0:
return dict(out)
p = [k for (k, v) in pure.items() if v == None][0]
if trace:
print("Trying %s:T" % p)
true = dpllStep([y for y in [x.apply({p: True}) for x in l] if not isinstance(y, And)], trace)
if type(true) == dict:
return dict(out + [(p, True)] + true.items())
if trace:
print("Failed %s:T" % p)
print("Trying %s:F" % p)
false = dpllStep([y for y in [x.apply({p: False}) for x in l] if not isinstance(y, And)], trace)
if type(false) == dict:
return dict(out + [(p, False)] + false.items())
if trace:
print("Failed %s:F" % p)
return False
def dpll(f, trace=False):
"""Glavni program metode DPLL.
Argumenta:
f -- logični izraz
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
f = cnf(f)
if isinstance(f, And):
l = f.l
else:
l = [f]
return dpllStep(l, trace)
def test():
izrazi=[And('p','q'),Or('p','q'),Or('p',And('q','p')),And(Or(Not('p'),'q'),'p'),And(Or('p','q'),Or('p','r')),And(And('p','q'),And('q','r'),And('r','p')),And(Or('p','q'),Or('q','r'),Or('r','p'),Not(And('p','q')),Not(And('q','r')),Not(And('r','p')))]
for i in izrazi:
print(i)
print(dpll(i))
def abbrev(p, s=None):
"""Vrne okrajšano obliko opisa stanja valuacije.
Argumenta:
p -- objekt za krajšanje
s -- zagotovilo, privzeto None
"""
if type(p) == tuple:
return '(%s,%d)' % (abbrev(p[0]), p[1])
elif type(p) == list:
return '[%s]' % ''.join([abbrev(x, s[i]) for i, x in enumerate(p)])
elif p == True:
return 'T' if s else 't'
elif p == False:
return 'F' if s else 'f'
else:
return 'N' if s else 'n'
class DAGNode:
"""Abstraktni razred vozlišča v usmerjenem acikličnem grafu (DAG).
Metode:
__init__ -- konstruktor
__repr__ -- znakovna predstavitev
init -- inicializacija
getValue -- vrne ustrezno trenutno vrednost
setValue -- nastavi ustrezno trenutno vrednost
getSure -- ali vrednosti otrok zagotavljajo trenutno vrednost
setSure -- nastavi zagotovilo o trenutni vrednosti
clearTemp -- pobriše začasne oznake
numVariants -- število variant podizrazov, ki jih je treba preveriti
valuate -- valuacija v dano logično vrednost
parents -- posodobitev stanja staršev
update -- posodobitev po spremembi stanja enega od otrok
Spremenljivke:
a -- seznam prednikov
v -- trenutno znane vrednosti izraza
vt -- začasne vrednosti ob predpostavki o veljavnosti začetnega vozlišča
vf -- začasne vrednosti ob predpostavki o neveljavnosti začetnega vozlišča
c -- vozlišča, od katerega so prišle vrednosti izraza
ct -- vozlišča, od katerega so prišle vrednosti izraza ob predpostavki o
veljavnosti začetnega vozlišča
cf -- vozlišča, od katerega so prišle vrednosti izraza ob predpostavki o
neveljavnosti začetnega vozlišča
s -- ali vrednosti otrok zagotavljajo trenutno znane vrednosti
st -- ali vrednosti otrok zagotavljajo trenutno znane začasne vrednosti
ob predpostavki o veljavnosti začetnega vozlišča
sf -- ali vrednosti otrok zagotavljajo trenutno znane začasne vrednosti
ob predpostavki o neveljavnosti začetnega vozlišča
"""
def __init__(self):
"""Konstruktor. Na abstraktnem razredu ga ne smemo klicati."""
raise Exception('Instantiating an abstract class.')
def __repr__(self):
"""Znakovna predstavitev."""
return '%s(%s,%s)' % tuple([abbrev(x, y) for (x, y) in [(self.v, self.s), (self.vt, self.st), (self.vf, self.sf)]])
def init(self):
"""Inicializacija vozlišča."""
self.a = []
self.v = [None]*self.numVariants()
self.vt = [None]*self.numVariants()
self.vf = [None]*self.numVariants()
self.c = [None]*self.numVariants()
self.ct = [None]*self.numVariants()
self.cf = [None]*self.numVariants()
self.s = [False]*self.numVariants()
self.st = [False]*self.numVariants()
self.sf = [False]*self.numVariants()
def getValue(self, p=None):
"""Vrne trajno ali začasno vrednost izraza.
Argument:
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
return self.v[k]
elif p:
return self.vt[k]
else:
return self.vf[k]
def setValue(self, b, c=None, p=None):
"""Nastavi trajno ali začasno vrednost izraza. Če sta začasni
vrednosti enaki, nastavi tudi trajno vrednost.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto None
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
self.v[k] = b
self.vt[k] = b
self.vf[k] = b
self.c[k] = c
elif p:
self.vt[k] = b
self.ct[k] = c
if self.vf[k] == b:
self.v[k] = b
self.c[k] = (c, self.cf[k])
else:
self.vf[k] = b
self.cf[k] = c
if self.vt[k] == b:
self.v[k] = b
self.c[k] = (self.ct[k], c)
def getSure(self, p=None):
"""Pove, ali vrednosti otrok zagotavljajo trenutno vrednost.
Argument:
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
return self.s[k]
elif p:
return self.st[k]
else:
return self.sf[k]
def setSure(self, p=None, trace=False):
"""Nastavi zagotovilo o trenutni vrednosti. Če obstajata zagotovili
o začasni vrednosti, nastavi zagotovilo o trajni vrednosti.
Vrne True, če je zagotovilo novo, in False, če je že obstajalo.
Argumenta:
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if p == None:
if self.s[k]:
return False
self.s[k] = True
self.st[k] = True
self.sf[k] = True
elif p:
if self.st[k]:
return False
self.st[k] = True
if self.sf[k]:
self.s[k] = True
else:
if self.sf[k]:
return False
self.sf[k] = True
if self.st[k]:
self.s[k] = True
if trace > 3:
print("Ensured at %s the value of the node %s" % (abbrev((p, k)), self))
return True
def clearTemp(self):
"""Pobriše začasne oznake."""
for i in range(self.numVariants()):
if self.v[i] == None:
self.vt[i] = None
self.vf[i] = None
self.ct[i] = None
self.cf[i] = None
self.st[i] = False
self.sf[i] = False
def numVariants(self):
"""Vrne število variant podizrazov, ki jih je treba preveriti.
Generična metoda, vrne 1."""
return 1
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Metodo kličejo nadomestne metode v dedujočih razredih. Če je vrednost
že določena, pove, ali podana vrednost ustreza določeni. V nasprotnem
primeru nastavi podano vrednost in vrne None. Tedaj sledi nadaljnja
obdelava v klicoči metodi.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
v = self.getValue(p)
if v != None:
if trace:
if v != b:
print("Error valuating to %s:%s the node %s from %s" % (abbrev(p), abbrev(b), self, c))
elif trace > 4:
print("Skipping valuation to %s:%s of the node %s" % (abbrev(p), abbrev(b), self))
return v == b
if trace > 2:
print("Valuating to %s:%s the node %s" % (abbrev(p), abbrev(b), self))
self.setValue(b, c, p)
return None
def parents(self, b, p=None, trace=False):
"""Posodobi starše po uspešni valuaciji v logično vrednost b.
Vrne True, če so vse posodobitve uspele, in False sicer.
Argumenti:
b -- nastavljena vrednost
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p, k = p
else:
k = 0
for x in self.a:
if type(x) == tuple:
x, t = x
else:
t = 0
if not x.update(b, (self, k), (p, t), trace):
return False
return True
def update(self, b, c=None, p=None, trace=False):
"""Posodobi stanje po valuaciji enega od otrok v logično vrednost b.
Generična metoda, ne spreminja stanja in vrne True.
Argumenti:
b -- nastavljena vrednost otroka
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
return True
class DAGLiteral(DAGNode):
"""Razred vozlišča v DAG, ki predstavlja logično spremenljivko.
Deduje od razreda DAGNode.
Nepodedovana spremenljivka:
p -- ime spremenljivke
"""
def __init__(self, d, p):
"""Konstruktor. Nastavi ime spremenljivke.
Argumenta:
d -- slovar podizrazov
p -- ime spremenljivke
"""
self.p = p
self.init()
def __repr__(self):
"""Znakovna predstavitev."""
return '%s: %s' % (DAGNode.__repr__(self), self.p)
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Valuacija uspe, če vrednost b ne nasprotuje že znani vrednosti.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p = p[0]
self.setSure(p, trace)
return DAGNode.valuate(self, b, c, p, trace) != False and self.parents(b, p, trace)
class DAGNot(DAGNode):
"""Razred vozlišča v DAG, ki predstavlja logično negacijo.
Deduje od razreda DAGNode.
Nepodedovana spremenljivka:
t -- vozlišče, ki ustreza negiranemu izrazu
"""
def __init__(self, d, t):
"""Konstruktor. Za negirani izraz poišče ali ustvari vozlišče
ter se vanj doda kot starš.
Argumenta:
d -- slovar podizrazov
t -- negirani izraz
"""
self.t = t.node(d)
self.t.a.append(self)
self.init()
def __repr__(self):
"""Znakovna predstavitev."""
r = str(self.t)
if len(r) > 100:
r = '...'
return "%s: ~(%s)" % (DAGNode.__repr__(self), r)
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Valuacija uspe, če vrednost b ne nasprotuje že znani vrednosti in se
negirani izraz uspešno valuira v nasprotno vrednost.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
val = DAGNode.valuate(self, b, c, p, trace)
if val == None:
if type(p) == tuple:
p = p[0]
return self.t.valuate(not b, (self, 0), p, trace) and self.parents(b, p, trace)
else:
return val
def update(self, b, c=None, p=None, trace=False):
"""Posodobi stanje po valuaciji otroka v logično vrednost b.
Uspe, če uspe valuacija v nasprotno vrednost od b.
Argumenti:
b -- nastavljena vrednost otroka
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p = p[0]
sure = self.t.getSure(p) and self.setSure(p, trace)
if b != None:
b = not b
val = DAGNode.valuate(self, b, c, p, trace)
if val == False:
return False
elif val:
b = None
return (b == None and not sure) or self.parents(b, p, trace)
class DAGAnd(DAGNode):
"""Razred vozlišča v DAG, ki predstavlja logično konjunkcijo.
Deduje od razreda DAGNode.
Nepodedovana spremenljivka:
l -- seznam vozlišč, ki ustrezajo konjunktom
"""
def __init__(self, d, l):
"""Konstruktor. Za vsak konjunkt poišče ali ustvari vozlišče
ter se doda kot starš dobljenemu vozlišču.
Argumenta:
d -- slovar podizrazov
l -- seznam konjuktov
"""
self.l = [x.node(d) for x in l]
for i, x in enumerate(self.l):
x.a.append((self, i))
self.init()
def __repr__(self):
"""Znakovna predstavitev."""
r = ') /\\ ('.join([str(x) for x in self.l])
if len(r) > 100:
r = '%d conjuncts' % len(self.l)
return '%s: (%s)' % (DAGNode.__repr__(self), r)
def getValue(self, p=None):
"""Vrne trajno ali začasno vrednost izraza.
Če hočemo vrednost zadnjega podizraza (dolžine 1), vrnemo vrednost zadnjega konjunkta.
Argument:
p -- začetna predpostavka, privzeto None (trajna vrednost)
"""
if type(p) == tuple and p[1] == self.numVariants():
return self.l[-1].getValue(p[0])
else:
return DAGNode.getValue(self, p)
def numVariants(self):
"""Vrne število variant podizrazov, ki jih je treba preveriti.
Vrne 1 ali število konjunktov minus 1."""
return max(1, len(self.l)-1)
def valuate(self, b, c=None, p=None, trace=False):
"""Valuacija v logično vrednost b.
Valuacija uspe, če vrednost b ne nasprotuje že znani vrednosti. Če je
b resničen, se morajo še vsi konjunkti valuirati v True. V nasprotnem
primeru preveri, ali je trenutna vrednost vsaj enega konjunkta različna
od True. Če edini tak konjunkt še nima vrednosti, ga valuira v False.
Argumenti:
b -- nastavljena vrednost
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
val = DAGNode.valuate(self, b, c, p, trace)
if val == None:
if type(p) == tuple:
p, k = p
else:
k = 0
if len(self.l) == 0:
if not b:
return False
self.setSure(p, trace)
elif len(self.l) == 1:
if not self.l[0].valuate(b, (self, k), p, trace):
return False
else:
i = k
if b:
while i < len(self.l)-1:
val = DAGNode.valuate(self, True, (self, k), (p, i+1), trace) if i < len(self.l)-2 else self.l[-1].valuate(True, (self, k), p, trace)
if val == False or not self.l[i].valuate(True, (self, k), p, trace):
return False
elif val:
break
i += 1
else:
while i < len(self.l)-1:
if self.l[i].getValue(p):
val = DAGNode.valuate(self, False, (self, k), (p, i+1), trace) if i < len(self.l)-2 else self.l[-1].valuate(False, (self, k), p, trace)
if val == False:
return False
if val:
break
else:
if (self.getValue((p, i+1)) if i < len(self.l)-2 else self.l[-1].getValue(p)) and not self.l[i].valuate(False, (self, k), p, trace):
return False
break
i += 1
if k > 0:
return self.update(b, (self, k), (p, k-1), trace)
else:
return self.parents(b, p, trace)
else:
return val
def update(self, b, c=None, p=None, trace=False):
"""Posodobi stanje po valuaciji enega od otrok v logično vrednost b.
Če je b neresničen, se poskusi valuirati v False. Če je v nasprotnem
primeru trenutna vrednost True, preveri, ali je trenutna vrednost vsaj
enega konjunkta različna od True. Če edini tak konjunkt še nima
vrednosti, ga valuira v False.
Argumenti:
b -- nastavljena vrednost otroka
c -- vozlišče, od katerega je prišla vrednost izraza, privzeto
None
p -- začetna predpostavka, privzeto None (trajna vrednost)
trace -- ali naj se izpisuje sled dokazovanja, privzeto False
"""
if type(p) == tuple:
p, k = p
else:
k = 0
if len(self.l) <= 1:
sure = True
else:
if b:
if k == len(self.l)-1:
k -= 1
if self.getValue((p, k)) == False:
if not self.l[k].valuate(False, c, p, trace):
return False
else:
b = None
elif not self.l[k].getValue(p):
b = None
elif (c[0] if type(c) == tuple else c) != self:
if self.getValue((p, k)) == False:
if not (self.valuate(False, c, (p, k+1), trace) if k < len(self.l)-2 else self.l[-1].valuate(False, c, p, trace)):
return False
else:
b = None
elif not (self.l[-1].getValue(p) if k == len(self.l)-2 else self.getValue((p, k+1))):
b = None
else:
if self.getValue((p, k)) == False:
if not self.l[k].valuate(False, c, p, trace):
return False
else:
b = None
elif not self.l[k].getValue(p):
b = None
sure = (self.l[-1].getSure(p) if k == len(self.l)-2 else self.getSure((p, k+1))) and self.l[k].getSure(p) and self.setSure((p, k), trace)
while b != None:
val = DAGNode.valuate(self, True, c, (p, k), trace)
if val == False:
return False
elif val:
b = None
k -= 1
if k < 0:
break
if self.getValue((p, k)) == False:
if not self.l[k].valuate(False, c, p, trace):
return False
else:
b = None
elif not self.l[k].getValue(p):
b = None
sure = sure and self.l[k].getSure(p) and self.setSure((p, k), trace)
else:
if k == len(self.l)-1:
k -= 1
sure = (self.l[-1].getValue(p) == False and self.l[-1].getSure(p)) if k == len(self.l)-2 else (self.getValue((p, k+1)) == False and self.getSure((p, k+1)))
sure = (sure or (self.l[k].getValue(p) == False and self.l[k].getSure(p))) and self.setSure((p, k), trace)
while b != None:
val = DAGNode.valuate(self, False, c, (p, k), trace)
if val == False:
return False
elif val:
b = None
k -= 1
if k < 0:
break
sure = (sure or (self.l[k].getValue(p) == False and self.l[k].getSure(p))) and self.setSure((p, k), trace)
while sure and k > 0:
k -= 1
sure = self.l[k].getSure(p)
if self.getValue((p, k)) == False:
sure = sure or (self.l[-1].getValue(p) if k == len(self.l)-2 else self.getValue((p, k+1))) == False
sure = sure and self.setSure((p, k), trace)
return (b == None and not sure) or self.parents(b, p, trace)
class LogicalFormula:
"""Abstraktni razred logičnih formul.
Metode:
__init__ -- konstruktor
__hash__ -- zgostitev
__repr__ -- znakovna predstavitev
__eq__ -- relacija "je enak"
__ne__ -- relacija "ni enak"
__lt__ -- relacija "je manjši"
__le__ -- relacija "je manjši ali enak"
__gt__ -- relacija "je večji"
__ge__ -- relacija "je večji ali enak"
flatten -- splošči izraz
simplify -- poenostavi izraz
cnf -- pretvori v konjunktivno normalno obliko
dnf -- pretvori v disjunktivno normalno obliko
ncf -- pretvori v obliko z negacijami in konjunkcijami
apply -- vrne izraz glede na podane vrednosti spremenljivk
node -- vrne vozlišče v DAG, ki ustreza izrazu
"""
def __init__(self):
"""Konstruktor. Na abstraktnem razredu ga ne smemo klicati."""
raise Exception('Instantiating an abstract class.')
def __hash__(self):
"""Zgostitev. Vrne zgostitev znakovne predstavitve."""
return self.__repr__().__hash__()
def __repr__(self, level=0):
"""Znakovna predstavitev.
Argument:
level -- nivo za postavljanje oklepajev, privzeto 0 (brez oklepajev)
"""
return ""
def __eq__(self, other):
"""Relacija "je enak".
Zaradi dedovanja metode __hash__ je definirana kot negacija relacije
"ni enak".
"""
return not (self != other)
def __ne__(self, other):
"""Relacija "ni enak".
Podrazredi morajo povoziti to metodo.
"""
return True
def __lt__(self, other):
"""Relacija "je manjši".
Podrazredi morajo povoziti to metodo.
"""
return True
def __le__(self, other):
"""Relacija "je manjši ali enak".
Definirana je kot negacija relacije "je večji".
"""
return not (self > other)
def __gt__(self, other):
"""Relacija "je večji".
Definirana je kot presek relacij "je večji ali enak" in "ni enak".
"""
return self >= other and self != other
def __ge__(self, other):
"""Relacija "je večji ali enak".
Definirana je kot negacija relacije "je manjši".
"""
return not (self < other)
def flatten(self):
"""Splošči izraz.
Generična metoda, vrne sebe.
"""
return self
def simplify(self):
"""Poenostavi izraz.
Generična metoda, vrne sebe.
"""
return self
def cnf(self):
"""Pretvori v konjunktivno normalno obliko.
Generična metoda, vrne sebe.
"""
return self
def dnf(self):
"""Pretvori v disjunktivno normalno obliko.
Generična metoda, vrne sebe.
"""
return self
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Generična metoda, vrne sebe.
"""
return self
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Generična metoda, vrne sebe.
Argument:
d -- slovar vrednosti spremenljivk
"""
return self
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Generična metoda, javi napako.
Argument:
d -- slovar vozlišč za izraze
"""
raise Exception('Not applicable in DAG.')
class Literal(LogicalFormula):
"""Logična spremenljivka.
Deduje od razreda LogicalFormula.
Spremenljivka:
p -- ime spremenljivke
"""
def __init__(self, p):
"""Konstruktor. Nastavi se ime spremenljivke, ki mora biti niz malih
črk.
Argument:
p -- ime spremenljivke
"""
if not isLiteral(p):
raise Exception('Literals must be strings of lowercase letters!')
self.p = p
def __repr__(self, level=0):
"""Znakovna predstavitev. Ta je enaka imenu spremenljivke."""
return paren(self.p, level, 6)
def __ne__(self, other):
"""Relacija "ni enak".
Spremenljivke se razlikujejo po svojem imenu.
"""
return not isinstance(other, Literal) or self.p != other.p
def __lt__(self, other):
"""Relacija "je manjši".
Spremenljivke se razvrščajo po svojem imenu in so manjše od ostalih
logičnih izrazov.
"""
if isinstance(other, Literal):
return self.p < other.p
else:
return isinstance(other, LogicalFormula)
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Nadomesti spremenljivko z vrednostjo iz slovarja, če ta obstaja.
Argument:
d -- slovar vrednosti spremenljivk
"""
if self.p in d:
if isLiteral(d[self.p]):
return Literal(d[self.p])
elif isinstance(d[self.p], bool):
return Tru() if d[self.p] else Fls()
elif isinstance(d[self.p], LogicalFormula):
return d[self.p].flatten()
return self
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Če izraza še ni v slovarju d, naredi novo vozlišče in ga doda v slovar.
Argument:
d -- slovar vozlišč za izraze
"""
if self not in d:
n = DAGLiteral(d, self.p)
d[self] = n
return d[self]
class Not(LogicalFormula):
"""Logična negacija.
Deduje od razreda LogicalFormula.
Spremenljivka:
t -- negirani izraz
"""
def __init__(self, t):
"""Konstruktor. Nastavi se negirani izraz.
Če je t veljaven niz, se negira spremenljivka s tem imenom.
Argument:
t -- negirani izraz
"""
if isLiteral(t):
t = Literal(t)
elif not isinstance(t, LogicalFormula):
raise Exception('Only logical formulas can be negated!')
self.t = t
def __repr__(self, level=0):
"""Znakovna predstavitev. Negacija se označi z znakom ~."""
return paren('~'+self.t.__repr__(6), level, 6)
def __ne__(self, other):
"""Relacija "ni enak".
Negacije se ločijo po negiranem izrazu.
"""
return not isinstance(other, Not) or self.t != other.t
def __lt__(self, other):
"""Relacija "je manjši".
Negacije se razvrščajo po negiranem izrazu in so manjše od ostalih
logičnih izrazov, razen spremenljivk.
"""
if isinstance(other, Not):
return self.t < other.t
else:
return isinstance(other, LogicalFormula) and not isinstance(other, Literal)
def flatten(self):
"""Splošči izraz.
Izniči dvojne negacije in splošči podizraze."""
if isinstance(self.t, Not):
return self.t.t.flatten()
elif isinstance(self.t, And):
return Or([Not(x) for x in self.t.l]).flatten()
elif isinstance(self.t, Or):
return And([Not(x) for x in self.t.l]).flatten()
else:
return self
def simplify(self):
"""Poenostavi izraz.
Izniči dvojno negacijo ter porine negacijo v konjunkcijo ali
disjunkcijo po de Morganovih zakonih.
"""
if isinstance(self.t, Not):
return self.t.t.simplify()
elif isinstance(self.t, And):
return Or([Not(x) for x in self.t.l]).simplify()
elif isinstance(self.t, Or):
return And([Not(x) for x in self.t.l]).simplify()
else:
return self
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Izniči dvojno negacijo ter porine negacijo v disjunkcijo po
de Morganovih zakonih.
"""
if isinstance(self.t, Not):
return self.t.t.ncf()
elif isinstance(self.t, Or):
return And([Not(x).ncf() for x in self.t.l])
else:
return Not(self.t.ncf())
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Aplikacijo naredi na negiranem izrazu, nato pa izvede poenostavitev.
Argument:
d -- slovar vrednosti spremenljivk
"""
return Not(self.t.apply(d)).flatten()
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Če izraza še ni v slovarju d, naredi novo vozlišče in ga doda v slovar.
Argument:
d -- slovar vozlišč za izraze
"""
if self not in d:
n = DAGNot(d, self.t)
d[self] = n
return d[self]
class And(LogicalFormula):
"""Logična konjunkcija.
Deduje od razreda LogicalFormula.
Spremenljivka:
l -- seznam konjunktov
"""
def __init__(self, *l):
"""Konstruktor. Nastavijo se konjunkti.
Konjunkti so lahko podani kot argumenti, kot seznam ali kot
logična konjunkcija. Če je kateri od konjunktov veljaven niz, se
uporabi spremenljivka s tem imenom.
Argumenti:
*l -- konjunkti
"""
self.l = None
if len(l) == 1:
if isinstance(l[0], Or):
self.l = l[0].l
elif isLiteral(l[0]):
self.l = [Literal(l[0])]
elif isinstance(l[0], list) or isinstance(l[0], tuple):
l = list(l[0])
if self.l == None:
l = [Literal(x) if isLiteral(x) else x for x in l]
if any([not isinstance(x, LogicalFormula) for x in l]):
raise Exception('Only logical formulas can be conjoined!')
self.l = l[:]
def __repr__(self, level=0):
"""Znakovna predstavitev. Konjunkti so ločeni z znakoma /\. Prazna
konjunkcija je logična resnica in se označi z znakom T."""
if len(self.l) == 0:
return paren('T', level, 6)
elif len(self.l) == 1:
return self.l[0].__repr__(level)
else:
return paren(' /\\ '.join([x.__repr__(6) for x in self.l]), level, 5)
def __ne__(self, other):
"""Relacija "ni enak".
Konjukcije se ločijo po seznamu konjunktov.
"""
return not isinstance(other, And) or self.l != other.l
def __lt__(self, other):
"""Relacija "je manjši".
Konjukcije se razvrščajo po seznamu konjunktov in so manjše od
disjunkcij.
"""
if isinstance(other, And):
return self.l < other.l
else:
return isinstance(other, LogicalFormula) and not isinstance(other, Literal) and not isinstance(other, Not)
def flatten(self):
"""Splošči izraz."""
if len(self.l) == 1:
return self.l[0].flatten()
else:
l = sum([y.l if isinstance(y, And) else [y] for y in [x.flatten() for x in self.l]], [])
if any([isinstance(x, Or) and len(x.l) == 0 for x in l]):
return Fls()
elif len(l) == 1:
return l[0]
else:
return And(l)
def simplify(self):
"""Poenostavi izraz.
Najprej splošči gnezdene konjunkcije med poenostavljenimi konjunkti.
Če je konjunkt natanko eden, ga vrne, sicer pa poenostavi disjunkcije
med konjunkti po pravilih absorpcije. Če je po teh poenostavitvah
kateri od konjunktov prazna disjunkcija (tj. logična neresnica) ali se
kateri od konjunktov pojavi še v negirani obliki, potem vrne logično
neresnico. V nasprotnem primeru se konjunkti uredijo po določenem
vrstnem redu.
"""
l = sum([y.l if isinstance(y, And) else [y] for y in [x.simplify() for x in self.l]], [])
if len(l) == 1:
return l[0]
else:
l = set(l)
l.difference_update([x for x in l if isinstance(x, Or) and any([y in x.l for y in l])])
assorb = [(x, [y.t for y in l if isinstance(y, Not) and y.t in x.l] + [Not(y) for y in l if Not(y) in x.l]) for x in l if isinstance(x, Or)]
remove = [x[0] for x in assorb if len(x[1]) > 0]
add = [Or([y for y in x[0].l if y not in x[1]]).simplify() for x in assorb if len(x[1]) > 0]
l.difference_update(remove)
l.update(add)
if len(l) == 1:
return l.pop()
if any([isinstance(x, Or) and len(x.l) == 0 for x in l]) or any([x.t in l for x in l if isinstance(x, Not)]):
return Fls()
return And(sortSet(l))
def cnf(self):
"""Pretvori v konjunktivno normalno obliko.
Vse konjunkte pretvori v konjunktivno normalno obliko.
"""
return And([x.cnf() for x in self.l]).flatten()
def dnf(self):
"""Pretvori v disjunktivno normalno obliko.
Če je število konjunktov 0 ali 1, vrne sebe oziroma edinega konjunkta v
disjunktivni normalni obliki. Sicer pretvori vse konjunkte v
disjunktivno normalno obliko, nato pa po pravilih za distributivnost
naredi disjunkcijo več konjunktov.
"""
if len(self.l) == 0:
return self
elif len(self.l) == 1:
return self.l[0].dnf()
l = [x.dnf() for x in self.flatten().l]
a = [x for x in l if not isinstance(x, Or)]
d = [x for x in l if isinstance(x, Or)]
if len(d) == 0:
return And(a)
else:
return Or([And(a + [x] + d[1:]).dnf() for x in d[0].l]).flatten()
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Vse konjunkte pretvori v obliko z negacijami in konjunkcijami.
"""
return And([x.ncf() for x in self.l])
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Aplikacijo naredi na vsakem konjunktu, nato pa izvede poenostavitev.
Argument:
d -- slovar vrednosti spremenljivk
"""
return And([x.apply(d) for x in self.l]).flatten()
def node(self, d):
"""Vrne vozlišče v DAG, ki ustreza izrazu.
Če izraza še ni v slovarju d, naredi novo vozlišče in ga doda v slovar.
Argument:
d -- slovar vozlišč za izraze
"""
if self not in d:
n = DAGAnd(d, self.l)
d[self] = n
return d[self]
class Or(LogicalFormula):
"""Logična disjunkcija.
Deduje od razreda LogicalFormula.
Spremenljivka:
l -- seznam disjunktov
"""
def __init__(self, *l):
"""Konstruktor. Nastavijo se disjunkti.
Disjunkti so lahko podani kot argumenti, kot seznam ali kot
logična disjunkcija. Če je kateri od disjunktov veljaven niz, se
uporabi spremenljivka s tem imenom.
Argumenti:
*l -- disjunkti
"""
self.l = None
if len(l) == 1:
if isinstance(l[0], Or):
self.l = l[0].l
elif isLiteral(l[0]):
self.l = [Literal(l[0])]
elif isinstance(l[0], list) or isinstance(l[0], tuple):
l = list(l[0])
if self.l == None:
l = [Literal(x) if isLiteral(x) else x for x in l]
if any([not isinstance(x, LogicalFormula) for x in l]):
raise Exception('Only logical formulas can be disjoined!')
self.l = l[:]
def __repr__(self, level=0):
"""Znakovna predstavitev. Disjunkti so ločeni z znakoma \/. Prazna
disjunkcija je logična neresnica in se označi z znakom F."""
if len(self.l) == 0:
return paren('F', level, 6)
elif len(self.l) == 1:
return self.l[0].__repr__(level)
else:
return paren(' \\/ '.join([x.__repr__(5) for x in self.l]), level, 4)
def __ne__(self, other):
"""Relacija "ni enak".
Disjukcije se ločijo po seznamu disjunktov.
"""
return not isinstance(other, Or) or self.l != other.l
def __lt__(self, other):
"""Relacija "je manjši".
Disjukcije se razvrščajo po seznamu konjunktov in so večje od ostalih
logičnih izrazov.
"""
return isinstance(other, Or) and self.l < other.l
def flatten(self):
"""Splošči izraz."""
if len(self.l) == 1:
return self.l[0].flatten()
else:
l = sum([y.l if isinstance(y, Or) else [y] for y in [x.flatten() for x in self.l]], [])
if any([isinstance(x, And) and len(x.l) == 0 for x in l]):
return Tru()
elif len(l) == 1:
return l[0]
else:
return Or(l)
def simplify(self):
"""Poenostavi izraz.
Najprej splošči gnezdene disjunkcije med poenostavljenimi disjunkti.
Če je disjunkt natanko eden, ga vrne, sicer pa poenostavi konjunkcije
med disjunkti po pravilih absorpcije. Če je po teh poenostavitvah
kateri od disjunktov prazna konjunkcija (tj. logična resnica) ali se
kateri od disjunktov pojavi še v negirani obliki, potem vrne logično
resnico. V nasprotnem primeru se disjunkti uredijo po določenem
vrstnem redu.
"""
l = sum([y.l if isinstance(y, Or) else [y] for y in [x.simplify() for x in self.l]], [])
if len(l) == 1:
return l[0]
else:
l = set(l)
l.difference_update([x for x in l if isinstance(x, And) and any([y in x.l for y in l])])
assorb = [(x, [y.t for y in l if isinstance(y, Not) and y.t in x.l] + [Not(y) for y in l if Not(y) in x.l]) for x in l if isinstance(x, And)]
remove = [x[0] for x in assorb if len(x[1]) > 0]
add = [And([y for y in x[0].l if y not in x[1]]).simplify() for x in assorb if len(x[1]) > 0]
l.difference_update(remove)
l.update(add)
if len(l) == 1:
return l.pop()
if any([isinstance(x, And) and len(x.l) == 0 for x in l]) or any([x.t in l for x in l if isinstance(x, Not)]):
return Tru()
else:
return Or(sortSet(l))
def cnf(self):
"""Pretvori v konjunktivno normalno obliko.
Če je število disjunktov 0 ali 1, vrne sebe oziroma edinega disjunkta v
konjunktivni normalni obliki. Sicer pretvori vse disjunkte v
konjunktivno normalno obliko, nato pa po pravilih za distributivnost
naredi konjunkcijo več disjunktov.
"""
if len(self.l) == 0:
return self
elif len(self.l) == 1:
return self.l[0].cnf()
l = [x.cnf() for x in self.flatten().l]
a = [x for x in l if not isinstance(x, And)]
d = [x for x in l if isinstance(x, And)]
if len(d) == 0:
return Or(a)
else:
return And([Or(a + [x] + d[1:]).cnf() for x in d[0].l]).flatten()
def dnf(self):
"""Pretvori v disjunktivno normalno obliko.
Vse disjunkte pretvori v disjunktivno normalno obliko.
"""
return Or([x.dnf() for x in self.l]).flatten()
def ncf(self):
"""Pretvori v obliko z negacijami in konjunkcijami.
Negacije vseh disjunktov pretvori v obliko z negacijami in
konjunkcijami ter vrne njihovo negirano konjunkcijo.
"""
return Not(And([Not(x).ncf() for x in self.l]))
def apply(self, d):
"""Vrne izraz glede na podane vrednosti spremenljivk.
Aplikacijo naredi na vsakem disjunktu, nato pa izvede poenostavitev.
Argument:
d -- slovar vrednosti spremenljivk
"""
return Or([x.apply(d) for x in self.l]).flatten()
class Implies(Or):
"""Logična implikacija, predstavljena kot disjunkcija konsekvensa z
negacijo precedensa.
Deduje od razreda Or.
"""
def __init__(self, prec, cons):
"""Konstruktor. Nastavita se disjunkta.
Argumenta:
prec -- precedens
cons -- konsekvens
"""
if isLiteral(prec):
prec = Literal(prec)
if isLiteral(cons):
cons = Literal(cons)
if not isinstance(prec, LogicalFormula) or not isinstance(cons, LogicalFormula):
raise Exception('Only logical formulas can be imply or be implied!')
self.l = [Not(prec), cons]
def __repr__(self, level=0):
"""Znakovna predstavitev. Precedens in konsekvens sta ločena z znakoma
=>."""
if len(self.l) == 2 and isinstance(self.l[0], Not):
return paren(self.l[0].t.__repr__(2) + ' => ' + self.l[1].__repr__(1), level, 1)
else:
return Or.__repr__(self, level)
class Tru(And):
"""Logična resnica, predstavljena kot prazna konjunkcija.
Deduje od razreda And.
"""
def __init__(self):
"""Konstruktor. Nastavi se prazen seznam konjunktov."""
self.l = []
class Fls(Or):
"""Logična neresnica, predstavljena kot prazna disjunkcija.
Deduje od razreda Or.
"""
def __init__(self):
"""Konstruktor. Nastavi se prazen seznam disjunktov."""
self.l = []
| bsd-3-clause | 7,319,391,701,429,646,000 | 33.434596 | 252 | 0.555154 | false |
LuciferJack/python-mysql-pool | docs/source/conf.py | 1 | 5134 | # -*- coding: utf-8 -*-
#
# PyMysqlPool documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 23 16:56:20 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyMysqlPool'
copyright = u'2017, Lucifer Jack'
author = u'Lucifer Jack'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyMysqlPooldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyMysqlPool.tex', u'PyMysqlPool Documentation',
u'Lucifer Jack', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pymysqlpool', u'PyMysqlPool Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyMysqlPool', u'PyMysqlPool Documentation',
author, 'PyMysqlPool', 'One line description of project.',
'Miscellaneous'),
]
| mit | 8,338,235,457,317,606,000 | 30.496933 | 79 | 0.675107 | false |
warnes/irrigatorpro | irrigator_pro/farms/signals.py | 1 | 11745 | from django.dispatch import receiver
from django.db.models.signals import *
from farms.models import *
from irrigator_pro.settings import DEBUG
def minNone( *args ):
args = filter( lambda x: x is not None, args)
if args:
return min(args)
else:
return None
## From
## http://stackoverflow.com/questions/15624817/have-loaddata-ignore-or-disable-post-save-signals
from functools import wraps
def disable_for_loaddata(signal_handler):
"""
Decorator that turns off signal handlers when loading fixture data.
"""
@wraps(signal_handler)
def wrapper(*args, **kwargs):
if kwargs.get('raw', None):
return
signal_handler(*args, **kwargs)
return wrapper
## These signal handlers records the (earliest) relevant date of any
## created/changed/deleted object upon which calculation of
## WaterRegister entries depend.
@receiver(pre_save, sender=WaterHistory)
@receiver(pre_delete, sender=WaterHistory)
@disable_for_loaddata
def handler_WaterHistory(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = WaterHistory.objects.get(pk=instance.id)
old_field=old_instance.field
new_field=new_instance.field
old_field.earliest_changed_dependency_date = minNone(old_field.earliest_changed_dependency_date,
old_instance.datetime.date()
)
new_field.earliest_changed_dependency_date = minNone(new_field.earliest_changed_dependency_date,
new_instance.datetime.date()
)
old_field.save()
new_field.save()
else:
try:
field = instance.field
if instance.datetime:
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
instance.datetime.date()
)
field.save()
except ValueError:
pass
@receiver(pre_save, sender=ProbeReading)
@receiver(pre_delete, sender=ProbeReading)
@disable_for_loaddata
def handler_ProbeReading(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = ProbeReading.objects.get(pk=instance.id)
old_radio_id = old_instance.radio_id
old_reading_date = old_instance.datetime.date()
old_probes = Probe.objects.filter(radio_id=old_radio_id,
crop_season__season_start_date__lte=old_reading_date,
crop_season__season_end_date__gte=old_reading_date)
for old_probe in old_probes:
field=old_probe.field
new_date = minNone(field.earliest_changed_dependency_date,
old_instance.datetime.date() )
field.earliest_changed_dependency_date = new_date
#if DEBUG: print "Field %s: %s --> %s " % (field, field.earliest_changed_dependency_date, new_date)
field.save()
this_radio_id = instance.radio_id
this_reading_date = instance.datetime.date()
new_probes = Probe.objects.filter(radio_id=this_radio_id,
crop_season__season_start_date__lte=this_reading_date,
crop_season__season_end_date__gte=this_reading_date)
for new_probe in new_probes:
field=new_probe.field
new_date = minNone(field.earliest_changed_dependency_date,
instance.datetime.date() )
field.earliest_changed_dependency_date = new_date
#if DEBUG: print "Field %s: %s --> %s " % (field, field.earliest_changed_dependency_date, new_date)
field.save()
@receiver(pre_save, sender=CropSeasonEvent)
@receiver(pre_delete, sender=CropSeasonEvent)
@disable_for_loaddata
def handler_CropSeasonEvent(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = CropSeasonEvent.objects.get(pk=instance.id)
old_field = old_instance.field
dep_mdate = minNone(old_field.earliest_changed_dependency_date, old_instance.date)
old_instance.field.earliest_changed_dependency_date = dep_mdate
old_field.save()
field = instance.field
dep_mdate = minNone(field.earliest_changed_dependency_date, instance.date)
field.earliest_changed_dependency_date = dep_mdate
field.save()
@receiver(pre_save, sender=CropSeason)
@receiver(pre_delete, sender=CropSeason)
@disable_for_loaddata
def handler_CropSeason(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = CropSeason.objects.get(pk=instance.id)
old_date = None
new_date = None
if old_instance.season_start_date != new_instance.season_start_date:
old_date = minNone(old_date, old_instance.season_start_date)
new_date = minNone(new_date, new_instance.season_start_date)
if old_instance.season_end_date != new_instance.season_end_date:
old_date = minNone(old_date, old_instance.season_end_date)
new_date = minNone(new_date, new_instance.season_end_date)
if old_instance.crop != new_instance.crop:
old_date = old_instance.season_start_date
new_date = new_instance.season_start_date
if old_date is not None:
for field in old_instance.field_list.all():
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
old_date)
field.save()
if new_date is not None:
for field in new_instance.field_list.all():
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
new_date)
field.save()
removed_fields = set( old_instance.field_list.all() ) - \
set( new_instance.field_list.all() )
added_fields = set( new_instance.field_list.all() ) - \
set( old_instance.field_list.all() )
for field in removed_fields:
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
old_instance.season_start_date)
field.save()
for field in added_fields:
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
new_instance.season_start_date)
field.save()
else:
pass
@receiver(post_save, sender=CropSeason)
@disable_for_loaddata
def handler_CropSeason_postsave(sender, instance, created, **kwargs):
if created == True:
for field in instance.field_list.all():
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
instance.season_start_date)
field.save()
@receiver(pre_save, sender=Probe)
@receiver(pre_delete, sender=Probe)
@disable_for_loaddata
def handler_Probe(sender, instance, **kwargs):
if instance.id: # save changes to existing object
new_instance = instance
old_instance = Probe.objects.get(pk=instance.id)
old_radio_id = old_instance.radio_id
old_season_start_date = old_instance.crop_season.season_start_date
old_season_end_date = old_instance.crop_season.season_end_date
old_probereadings = ProbeReading.objects.filter(radio_id=old_radio_id,
datetime__range=(old_season_start_date,
old_season_end_date)
)
if old_probereadings:
old_earliest_probereading_date = old_probereadings.earliest('datetime').datetime.date();
else:
old_earliest_probereading_date = None;
new_radio_id = new_instance.radio_id
new_season_start_date = new_instance.crop_season.season_start_date
new_season_end_date = new_instance.crop_season.season_end_date
new_probereadings = ProbeReading.objects.filter(radio_id=new_radio_id,
datetime__range=(new_season_start_date,
new_season_end_date)
)
if new_probereadings:
new_earliest_probereading_date = new_probereadings.earliest('datetime').datetime.date();
else:
new_earliest_probereading_date = None;
if old_radio_id != new_radio_id: # changed radioid
if old_instance.id and old_instance.field:
field=old_instance.field
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
old_earliest_probereading_date)
field.save()
if new_instance.id and new_instance.field:
field=new_instance.field
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
new_earliest_probereading_date)
field.save()
old_field = old_instance.field
new_field = new_instance.field
if old_field:
old_field.earliest_changed_dependency_date = minNone(old_field.earliest_changed_dependency_date,
old_earliest_probereading_date)
old_field.save()
if new_field:
new_field.earliest_changed_dependency_date = minNone(new_field.earliest_changed_dependency_date,
new_earliest_probereading_date)
new_field.save()
else: # new object or delete object
radio_id = instance.radio_id
season_start_date = instance.crop_season.season_start_date
season_end_date = instance.crop_season.season_end_date
probereadings = ProbeReading.objects.filter(radio_id=radio_id,
datetime__range=(season_start_date,
season_end_date)
)
if probereadings:
earliest_probereading_date = probereadings.earliest('datetime').datetime.date();
else:
earliest_probereading_date = None;
if instance.id and instance.field:
field=instance.field
field.earliest_changed_dependency_date = minNone(field.earliest_changed_dependency_date,
earliest_probereading_date)
field.save()
| mit | -8,849,396,207,397,116,000 | 42.66171 | 111 | 0.558791 | false |
alex/sentry | tests/sentry/web/frontend/tests.py | 1 | 15481 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import json
from django.conf import settings as django_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from sentry.conf import settings
from sentry.models import Group, Project, TeamMember, \
MEMBER_OWNER, MEMBER_USER, Team
from sentry.web.helpers import get_login_url
from tests.base import TestCase
logger = logging.getLogger(__name__)
class SentryViewsTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def setUp(self):
self.user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
self.user.set_password('admin')
self.user.save()
def test_auth(self):
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/login.html')
resp = self.client.post(reverse('sentry-login'), {
'username': 'admin',
'password': 'admin',
}, follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'sentry/login.html')
def test_dashboard(self):
# no projects redirects them to create new project
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/projects/new.html')
# requires at least one project to show dashboard
Project.objects.create(name='foo', owner=self.user)
Project.objects.create(name='bar', owner=self.user).team
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/dashboard.html')
# no projects and unauthenticated
self.client.logout()
Project.objects.all().delete()
resp = self.client.get(reverse('sentry'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/login.html')
def test_index(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry', kwargs={'project_id': 1}) + '?sort=freq', follow=False)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
def test_group_details(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group', kwargs={'project_id': 1, 'group_id': 2}), follow=False)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_event_list(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-events', kwargs={'project_id': 1, 'group_id': 2}), follow=False)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/event_list.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_message_details(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-event', kwargs={'project_id': 1, 'group_id': 2, 'event_id': 4}), follow=True)
self.assertEquals(resp.status_code, 200, resp.content)
self.assertTemplateUsed(resp, 'sentry/groups/event.html')
self.assertTrue('group' in resp.context)
group = Group.objects.get(pk=2)
self.assertEquals(resp.context['group'], group)
def test_group_json_multi(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}))
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json')
self.assertEquals(json.loads(resp.content)[0]['level'], 'error')
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}), {'limit': 1})
self.assertEquals(resp.status_code, 200)
resp = self.client.get(reverse('sentry-group-events-json', kwargs={'project_id': 1, 'group_id': 2}), {'limit': settings.MAX_JSON_RESULTS+1})
self.assertEquals(resp.status_code, 400)
def test_group_events_details_json(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-group-event-json', kwargs={'project_id': 1, 'group_id': 2, 'event_id_or_latest': 'latest'}))
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json')
self.assertEquals(json.loads(resp.content)['level'], 'error')
def test_status_env(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/env.html')
def test_status_packages(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-packages-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/packages.html')
def test_status_queue(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-queue-status'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/queue.html')
def test_stats(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-stats'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/stats.html')
def test_manage_users(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-admin-users'), follow=True)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/users/list.html')
def test_event_list(self):
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-events', kwargs={'project_id': 1}))
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/events/event_list.html')
def test_replay_event(self):
# bad event_id
self.client.login(username='admin', password='admin')
resp = self.client.get(reverse('sentry-replay', kwargs={'project_id': 1, 'event_id': 1}))
self.assertEquals(resp.status_code, 302)
# valid params
# self.client.login(username='admin', password='admin')
# resp = self.client.get(reverse('sentry-replay', kwargs={'project_id': 1, 'event_id': 4}))
# self.assertEquals(resp.status_code, 200)
# self.assertTemplateUsed(resp, 'sentry/events/replay.html')
class ViewPermissionTest(TestCase):
"""
These tests simply ensure permission requirements for various views.
"""
fixtures = ['tests/fixtures/views.json']
def setUp(self):
self.user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
self.user.set_password('admin')
self.user.save()
self.user2 = User(username="member", email="member@localhost")
self.user2.set_password('member')
self.user2.save()
self.user3 = User(username="nobody", email="nobody@localhost")
self.user3.set_password('nobody')
self.user3.save()
self.user4 = User(username="owner", email="owner@localhost")
self.user4.set_password('owner')
self.user4.save()
self.team = Team.objects.create(owner=self.user4, name='foo')
self.project = Project.objects.get(id=1)
self.project.update(public=False, team=self.team)
self.tm = TeamMember.objects.get_or_create(
user=self.user2,
team=self.team,
type=MEMBER_USER,
)[0]
TeamMember.objects.get_or_create(
user=self.user4,
team=self.team,
type=MEMBER_OWNER,
)[0]
def _assertPerm(self, path, template, account=None, want=True):
"""
Requests ``path`` and asserts that ``template`` is
rendered for ``account`` (Anonymous if None) given ``want``
is Trueish.
"""
if account:
self.assertTrue(self.client.login(username=account, password=account))
else:
self.client.logout()
resp = self.client.get(path)
if want:
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, template)
else:
self.assertEquals(resp.status_code, 302)
self.assertTemplateNotUsed(resp, template)
def test_project_list(self):
path = reverse('sentry-project-list')
template = 'sentry/projects/list.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'nobody')
self._assertPerm(path, template, None, False)
def test_new_project(self):
path = reverse('sentry-new-project')
template = 'sentry/projects/new.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, None, False)
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=True, SENTRY_ALLOW_TEAM_CREATION=True):
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'nobody')
self._assertPerm(path, template, None, False)
def test_manage_project(self):
path = reverse('sentry-manage-project', kwargs={'project_id': 1})
template = 'sentry/projects/manage.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_remove_project(self):
# We cant delete the default project
with self.Settings(SENTRY_PROJECT=2):
path = reverse('sentry-remove-project', kwargs={'project_id': 1})
template = 'sentry/projects/remove.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_new_team_member(self):
path = reverse('sentry-new-team-member', kwargs={'team_slug': self.team.slug})
template = 'sentry/teams/members/new.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_edit_team_member(self):
path = reverse('sentry-edit-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
template = 'sentry/teams/members/edit.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
def test_remove_team_member(self):
path = reverse('sentry-remove-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
template = 'sentry/teams/members/remove.html'
self._assertPerm(path, template, 'admin')
self._assertPerm(path, template, 'owner')
self._assertPerm(path, template, None, False)
self._assertPerm(path, template, 'nobody', False)
self._assertPerm(path, template, 'member', False)
class SentrySearchTest(TestCase):
def test_checksum_query(self):
checksum = 'a' * 32
g = Group.objects.create(
project_id=1,
logger='root',
culprit='a',
checksum=checksum,
message='hi',
)
with self.Settings(SENTRY_PUBLIC=True):
response = self.client.get(reverse('sentry-search', kwargs={'project_id': 1}), {'q': '%s$%s' % (checksum, checksum)})
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver%s' % (g.get_absolute_url(),))
def test_dupe_checksum(self):
checksum = 'a' * 32
g1 = Group.objects.create(
project_id=1,
logger='root',
culprit='a',
checksum=checksum,
message='hi',
)
g2 = Group.objects.create(
project_id=1,
logger='root',
culprit='b',
checksum=checksum,
message='hi',
)
with self.Settings(SENTRY_PUBLIC=True, SENTRY_USE_SEARCH=False):
response = self.client.get(reverse('sentry-search', kwargs={'project_id': 1}), {'q': '%s$%s' % (checksum, checksum)})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'sentry/search.html')
context = response.context
self.assertTrue('event_list' in context)
self.assertEquals(len(context['event_list']), 2)
self.assertTrue(g1 in context['event_list'])
self.assertTrue(g2 in context['event_list'])
class SentryHelpersTest(TestCase):
def test_get_db_engine(self):
from sentry.utils.db import get_db_engine
_databases = getattr(django_settings, 'DATABASES', {}).copy()
django_settings.DATABASES['default'] = {'ENGINE': 'blah.sqlite3'}
self.assertEquals(get_db_engine(), 'sqlite3')
django_settings.DATABASES['default'] = {'ENGINE': 'blah.mysql'}
self.assertEquals(get_db_engine(), 'mysql')
django_settings.DATABASES = _databases
def test_get_login_url(self):
with self.Settings(LOGIN_URL='/really-a-404'):
url = get_login_url(True)
self.assertEquals(url, reverse('sentry-login'))
with self.Settings(LOGIN_URL=reverse('sentry-fake-login')):
url = get_login_url(True)
self.assertEquals(url, reverse('sentry-fake-login'))
# should still be cached
with self.Settings(LOGIN_URL='/really-a-404'):
url = get_login_url(False)
self.assertEquals(url, reverse('sentry-fake-login'))
with self.Settings(SENTRY_LOGIN_URL=None):
url = get_login_url(True)
self.assertEquals(url, reverse('sentry-login'))
| bsd-3-clause | -5,759,128,192,834,405,000 | 41.647383 | 148 | 0.63284 | false |
ocefpaf/pycsw | pycsw/plugins/outputschemas/__init__.py | 1 | 1360 | # -*- coding: ISO-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
__all__ = ['atom', 'dif', 'fgdc', 'gm03']
| mit | 4,767,057,192,456,334,000 | 42.870968 | 67 | 0.677206 | false |
Infinidat/gitpy | tests/test_deleting.py | 1 | 1217 | import os
from test_commits import CommittedRepositoryTest
class DeletionTest(CommittedRepositoryTest):
def testDeletingFile(self):
filename = self.makeSomeChange()
full_filename = os.path.join(self.repo.path, filename)
self.repo.addAll()
self.repo.commit(message="test")
self.assertTrue(os.path.exists(full_filename))
self.repo.delete(filename)
self.assertFalse(os.path.exists(full_filename))
self.repo.resetHard()
self.assertTrue(os.path.exists(full_filename))
self.repo.delete(filename)
self.repo.commit(message="test")
self.assertFalse(os.path.exists(full_filename))
self.repo.resetHard()
self.assertFalse(os.path.exists(full_filename))
def testGetDeletedFiles(self):
filename = self.makeSomeChange()
full_filename = os.path.join(self.repo.path, filename)
self.repo.addAll()
self.repo.commit(message="test")
self.assertTrue(os.path.exists(full_filename))
os.unlink(full_filename)
deleted_files = self.repo.getDeletedFiles()
self.assertEquals(len(deleted_files), 1)
self.assertEquals(deleted_files[0].filename, filename)
| bsd-3-clause | 4,320,244,271,205,427,700 | 40.965517 | 62 | 0.67461 | false |
michaelmior/django-messages | django_messages/tests.py | 1 | 7287 | import datetime
from django.test import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django_messages.models import Message
from django_messages.utils import format_subject, format_quote
class SendTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user('user1', '[email protected]', '123456')
self.user2 = User.objects.create_user('user2', '[email protected]', '123456')
self.msg1 = Message(sender=self.user1, recipient=self.user2, subject='Subject Text', body='Body Text')
self.msg1.save()
def testBasic(self):
self.assertEquals(self.msg1.sender, self.user1)
self.assertEquals(self.msg1.recipient, self.user2)
self.assertEquals(self.msg1.subject, 'Subject Text')
self.assertEquals(self.msg1.body, 'Body Text')
self.assertEquals(self.user1.sent_messages.count(), 1)
self.assertEquals(self.user1.received_messages.count(), 0)
self.assertEquals(self.user2.received_messages.count(), 1)
self.assertEquals(self.user2.sent_messages.count(), 0)
class DeleteTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user('user3', '[email protected]', '123456')
self.user2 = User.objects.create_user('user4', '[email protected]', '123456')
self.msg1 = Message(sender=self.user1, recipient=self.user2, subject='Subject Text 1', body='Body Text 1')
self.msg2 = Message(sender=self.user1, recipient=self.user2, subject='Subject Text 2', body='Body Text 2')
self.msg1.sender_deleted_at = datetime.datetime.now()
self.msg2.recipient_deleted_at = datetime.datetime.now()
self.msg1.save()
self.msg2.save()
def testBasic(self):
self.assertEquals(Message.objects.outbox_for(self.user1).count(), 1)
self.assertEquals(Message.objects.outbox_for(self.user1)[0].subject, 'Subject Text 2')
self.assertEquals(Message.objects.inbox_for(self.user2).count(),1)
self.assertEquals(Message.objects.inbox_for(self.user2)[0].subject, 'Subject Text 1')
#undelete
self.msg1.sender_deleted_at = None
self.msg2.recipient_deleted_at = None
self.msg1.save()
self.msg2.save()
self.assertEquals(Message.objects.outbox_for(self.user1).count(), 2)
self.assertEquals(Message.objects.inbox_for(self.user2).count(),2)
class IntegrationTestCase(TestCase):
"""
Test the app from a user perpective using Django's Test-Client.
"""
T_USER_DATA = [{'username': 'user_1', 'password': '123456',
'email': '[email protected]'},
{'username': 'user_2', 'password': '123456',
'email': '[email protected]'},]
T_MESSAGE_DATA = [{'subject': 'Test Subject 1',
'body': 'Lorem ipsum\ndolor sit amet\n\nconsectur.'}]
def setUp(self):
""" create 2 users and a test-client logged in as user_1 """
self.user_1 = User.objects.create_user(**self.T_USER_DATA[0])
self.user_2 = User.objects.create_user(**self.T_USER_DATA[1])
self.c = Client()
self.c.login(username=self.T_USER_DATA[0]['username'],
password=self.T_USER_DATA[0]['password'])
def testInboxEmpty(self):
""" request the empty inbox """
response = self.c.get(reverse('messages_inbox'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.templates[0].name, 'django_messages/inbox.html')
self.assertEquals(len(response.context['message_list']), 0)
def testOutboxEmpty(self):
""" request the empty outbox """
response = self.c.get(reverse('messages_outbox'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.templates[0].name, 'django_messages/outbox.html')
self.assertEquals(len(response.context['message_list']), 0)
def testTrashEmpty(self):
""" request the empty trash """
response = self.c.get(reverse('messages_trash'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.templates[0].name, 'django_messages/trash.html')
self.assertEquals(len(response.context['message_list']), 0)
def testCompose(self):
""" compose a message step by step """
response = self.c.get(reverse('messages_compose'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.templates[0].name, 'django_messages/compose.html')
response = self.c.post(reverse('messages_compose'),
{'recipient': self.T_USER_DATA[1]['username'],
'subject': self.T_MESSAGE_DATA[0]['subject'],
'body': self.T_MESSAGE_DATA[0]['body']})
# successfull sending should redirect to inbox
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], "http://testserver%s"%reverse('messages_inbox'))
# make sure the message exists in the outbox after sending
response = self.c.get(reverse('messages_outbox'))
self.assertEquals(len(response.context['message_list']), 1)
def testReply(self):
""" test that user_2 can reply """
# create a message for this test
Message.objects.create(sender=self.user_1,
recipient=self.user_2,
subject=self.T_MESSAGE_DATA[0]['subject'],
body=self.T_MESSAGE_DATA[0]['body'])
# log the user_2 in and check the inbox
self.c.login(username=self.T_USER_DATA[1]['username'],
password=self.T_USER_DATA[1]['password'])
response = self.c.get(reverse('messages_inbox'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.templates[0].name, 'django_messages/inbox.html')
self.assertEquals(len(response.context['message_list']), 1)
pk = getattr(response.context['message_list'][0], 'pk')
# reply to the first message
response = self.c.get(reverse('messages_reply',
kwargs={'message_id':pk}))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.templates[0].name, 'django_messages/compose.html')
self.assertEquals(response.context['form'].initial['body'],
format_quote(self.user_1, self.T_MESSAGE_DATA[0]['body']))
self.assertEqual(response.context['form'].initial['subject'],
u"Re: %(subject)s"%{'subject': self.T_MESSAGE_DATA[0]['subject']})
class FormatTestCase(TestCase):
""" some tests for helper functions """
def testSubject(self):
""" test that reply counting works as expected """
self.assertEquals(format_subject(u"foo bar"), u"Re: foo bar")
self.assertEquals(format_subject(u"Re: foo bar"), u"Re[2]: foo bar")
self.assertEquals(format_subject(u"Re[2]: foo bar"), u"Re[3]: foo bar")
self.assertEquals(format_subject(u"Re[10]: foo bar"), u"Re[11]: foo bar")
| bsd-3-clause | 6,411,869,984,826,800,000 | 49.262069 | 114 | 0.62783 | false |
BrewCenter/BrewCenterAPI | brewcenter_api/brew_data/data_miner/brew_target/yeast.py | 1 | 3040 | from brew_data.data_miner.brew_target.utils import clean
class Yeast:
def __init__(self, data):
self.name = data[0]
self.type = data[1]
self.form = data[2]
self.lab = data[3]
self.min_temp = data[4]
self.max_temp = data[5]
self.flocculation = data[6]
self.attenuation = data[7]
self.notes = clean(data[8])
self.transform()
def transform(self):
self.name = '"' + self.name + '"'
self.type = '"' + self.type + '"'
self.lab = '"' + self.lab + '"'
self.flocculation = '"' + self.flocculation + '"'
# convert "None" notes to empty
if self.notes is None:
self.notes = '""'
else:
self.notes = '"' + self.notes + '"'
self.is_liquid = 0
if self.form == "Liquid":
self.is_liquid = 1
def get_keys():
return ("name, type_id, is_liquid, lab, min_temp, max_temp, "
"flocculation, attenuation, notes")
def __str__(self):
return "{0},{1},{2},{3},{4},{5},{6},{7},{8}".format(
self.name,
self.type_id,
self.is_liquid,
self.lab,
self.min_temp,
self.max_temp,
self.flocculation,
self.attenuation,
self.notes,
)
def get_yeast(s, d, stdout):
"""
Gets yeast from the source database (s), transforms them,
and puts them in the destination database (d)
"""
n = 0
d.execute('DROP TABLE IF EXISTS yeasttype;')
d.execute('DROP TABLE IF EXISTS yeast;')
d.execute('CREATE TABLE yeasttype(name TEXT);')
d.execute('CREATE TABLE yeast(' \
'name TEXT,' \
'type_id int,' \
'is_liquid int,' \
'lab TEXT,' \
'min_temp FLOAT,' \
'max_temp FLOAT,' \
'flocculation FLOAT,' \
'attenuation FLOAT,' \
'notes TEXT' \
');'
)
s.execute('SELECT "name", "ytype", "form", "laboratory", "min_temperature", "max_temperature", "flocculation", "attenuation", "notes" FROM yeast WHERE `deleted`=0;')
cur = s.fetchone()
while cur:
y = Yeast(cur)
# check for the yeast type and set it's foreign id
y.type_id = 'NULL'
if y.type is not 'NULL':
d.execute('SELECT `rowid` FROM yeasttype WHERE name={0};'.format(y.type))
yeast_type_id = d.fetchone()
if yeast_type_id is None:
d.execute('INSERT INTO yeasttype(name) VALUES ({0});'.format(y.type))
d.execute('SELECT `rowid` FROM yeasttype WHERE name={0};'.format(y.type))
yeast_type_id = d.fetchone()
y.type_id = yeast_type_id[0] if yeast_type_id else 'NULL'
d.execute('INSERT INTO yeast({0}) VALUES({1});'.format(Yeast.get_keys(), y))
n+=1
cur = s.fetchone()
print("Found {0} yeast.".format(n))
| gpl-3.0 | -3,451,735,403,440,097,000 | 31.688172 | 170 | 0.500329 | false |
henriquegemignani/randovania | test/server/test_server_app.py | 1 | 5033 | from unittest.mock import MagicMock, call
import flask
import pytest
from randovania.network_common.error import NotLoggedIn, ServerError, InvalidSession
from randovania.server import database
from randovania.server.server_app import ServerApp
@pytest.fixture(name="server_app")
def server_app_fixture(flask_app, skip_qtbot):
flask_app.config['SECRET_KEY'] = "key"
flask_app.config["DISCORD_CLIENT_ID"] = 1234
flask_app.config["DISCORD_CLIENT_SECRET"] = 5678
flask_app.config["DISCORD_REDIRECT_URI"] = "http://127.0.0.1:5000/callback/"
flask_app.config["FERNET_KEY"] = b's2D-pjBIXqEqkbeRvkapeDn82MgZXLLQGZLTgqqZ--A='
flask_app.config["GUEST_KEY"] = b's2D-pjBIXqEqkbeRvkapeDn82MgZXLLQGZLTgqqZ--A='
server = ServerApp(flask_app)
server.metrics.summary = MagicMock()
server.metrics.summary.return_value.side_effect = lambda x: x
return server
def test_session(server_app):
server_app.sio = MagicMock()
with server_app.app.test_request_context():
flask.request.sid = 1234
result = server_app.session()
assert result == server_app.sio.server.session.return_value
server_app.sio.server.session.assert_called_once_with(1234, namespace=None)
def test_get_session(server_app):
server_app.sio = MagicMock()
with server_app.app.test_request_context():
flask.request.sid = 1234
result = server_app.get_session()
assert result == server_app.sio.server.get_session.return_value
server_app.sio.server.get_session.assert_called_once_with(1234, namespace=None)
def test_get_current_user_ok(server_app, clean_database):
server_app.get_session = MagicMock(return_value={"user-id": 1234})
user = database.User.create(id=1234, name="Someone")
# Run
result = server_app.get_current_user()
# Assert
assert result == user
def test_get_current_user_not_logged(server_app, clean_database):
server_app.get_session = MagicMock(return_value={})
# Run
with pytest.raises(NotLoggedIn):
server_app.get_current_user()
def test_get_current_user_unknown_user(server_app, clean_database):
server_app.get_session = MagicMock(return_value={"user-id": 1234})
# Run
with pytest.raises(InvalidSession):
server_app.get_current_user()
def test_join_game_session(mocker, server_app):
mock_join_room = mocker.patch("flask_socketio.join_room")
membership = MagicMock()
membership.session.id = "session_id"
membership.user.id = "user_id"
session = {}
server_app.session = MagicMock()
server_app.session.return_value.__enter__.return_value = session
# Run
server_app.join_game_session(membership)
# Assert
mock_join_room.assert_has_calls([
call("game-session-session_id"),
call("game-session-session_id-user_id"),
])
assert session == {
"current_game_session": "session_id",
}
def test_leave_game_session_with_session(mocker, server_app):
# Setup
mock_leave_room = mocker.patch("flask_socketio.leave_room")
user = MagicMock()
user.id = "user_id"
server_app.get_current_user = lambda: user
session = {"current_game_session": "session_id"}
server_app.session = MagicMock()
server_app.session.return_value.__enter__.return_value = session
# Run
server_app.leave_game_session()
# Assert
mock_leave_room.assert_has_calls([
call("game-session-session_id"),
call("game-session-session_id-user_id"),
])
assert session == {}
def test_leave_game_session_without_session(mocker, server_app):
# Setup
mock_leave_room: MagicMock = mocker.patch("flask_socketio.leave_room")
server_app.session = MagicMock()
server_app.session.return_value.__enter__.return_value = {}
# Run
server_app.leave_game_session()
# Assert
mock_leave_room.assert_not_called()
def test_on_success_ok(server_app):
# Setup
custom = MagicMock(return_value={"foo": 12345})
server_app.on("custom", custom)
# Run
test_client = server_app.sio.test_client(server_app.app)
result = test_client.emit("custom", callback=True)
# Assert
custom.assert_called_once_with(server_app)
assert result == {"result": {"foo": 12345}}
def test_on_success_network_error(server_app):
# Setup
error = NotLoggedIn()
custom = MagicMock(side_effect=error)
server_app.on("custom", custom)
# Run
test_client = server_app.sio.test_client(server_app.app)
result = test_client.emit("custom", callback=True)
# Assert
custom.assert_called_once_with(server_app)
assert result == error.as_json
def test_on_success_exception(server_app):
# Setup
custom = MagicMock(side_effect=RuntimeError("something happened"))
server_app.on("custom", custom)
# Run
test_client = server_app.sio.test_client(server_app.app)
result = test_client.emit("custom", callback=True)
# Assert
custom.assert_called_once_with(server_app)
assert result == ServerError().as_json
| gpl-3.0 | -6,865,997,746,060,627,000 | 28.261628 | 84 | 0.680707 | false |
TataneInYourFace/wefill | app/forms/order_form.py | 1 | 1471 | # -*- coding: utf-8 -*-
import datetime
from django import forms
GAS_QUANTITY = (
('20', '20 Litres'),
('25', '25 Litres'),
('30', '30 Litres'),
('35', '35 Litres'),
('40', '40 Litres'),
('45', '45 Litres'),
('50', '50 Litres'),
('55', '55 Litres'),
('60', '60 Litres'),
)
class OrderForm(forms.Form):
user = forms.IntegerField(widget=forms.HiddenInput(), required=False)
address = forms.ChoiceField()
vehicle = forms.ChoiceField()
gas_name = forms.ChoiceField()
gas_quantity = forms.ChoiceField(widget=forms.Select(attrs={'class':'selectpicker'}), choices=GAS_QUANTITY)
date_refill = forms.DateTimeField(widget=forms.HiddenInput())
def __init__(self, data=None, addresses=None, vehicles=None, gas_choices=None, *args, **kwargs):
super(OrderForm, self).__init__(data, *args, **kwargs)
if addresses is not None:
self.fields['address'] = forms.ChoiceField(
choices=[(str(address['id']), address['name']) for address in addresses]
)
if vehicles is not None:
self.fields['vehicle'] = forms.ChoiceField(
choices=[(str(vehicle['id']), vehicle['name']) for vehicle in vehicles]
)
if gas_choices is not None:
self.fields['gas_name'] = forms.ChoiceField(
choices=[(gas['name'], '{0} - {1} €/L'.format(gas['name'], gas['price'])) for gas in gas_choices]
)
| mit | 3,933,943,561,269,895,700 | 33.97619 | 113 | 0.577263 | false |
CoBiG2/RAD_Tools | segregating_loci_finder.py | 1 | 3130 | #!/usr/bin/env python3
# Copyright 2018 Francisco Pina Martins <[email protected]>
# segregating_loci_finder.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Loci_counter is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Loci_counter. If not, see <http://www.gnu.org/licenses/>.
# This script will compare two groups of individuals and highlight any
# loci that segregate both groups
# Usage: python3 segregating_loci_finder.py /path/to/file.vcf \
# number_of_1st_group_individuals(int)
import re
from collections import Counter
def vcf_parser(vcf_filename, group_split_point):
"""
Parses a vcf file and returns TODO
"""
infile = open(vcf_filename, 'r')
loci = {}
group_split_point = int(group_split_point)
for lines in infile:
if lines.startswith("#"): # Skip headers
if lines.startswith("#CHROM"): # Group checker lines
lines = lines.split()
data = lines[9:]
groups = [data[:group_split_point],
data[group_split_point:]]
print(groups)
else:
lines = lines.split()
locus = lines[0]
data = lines[9:]
groups = [data[:group_split_point], data[group_split_point:]]
gr_freqs = [get_freqs(x) for x in groups]
loci[locus] = gr_freqs
return loci
def get_freqs(vcf_data):
"""
Gets relative frequencies from VCF data
"""
abs_freqs = [re.match(".*?:", x).group(0)[:-1] for x in vcf_data]
dummy_freqs = {"0/0": 0, "0/1": 0, "1/0": 0, "1/1": 0, "./.": 0}
rel_freqs = Counter(abs_freqs)
try:
mvs = rel_freqs.pop("./.")
except KeyError:
mvs = 0
dummy_freqs.update(Counter(abs_freqs))
rel_freqs = dummy_freqs
rel_freqs["0/1"] += rel_freqs.pop("1/0")
try:
non_missing = len(abs_freqs) - mvs
rel_freqs = {k: v/non_missing for k, v in rel_freqs.items()}
except ZeroDivisionError:
rel_freqs = None
# print(rel_freqs)
return rel_freqs
def segregating_freqs(loci):
"""
Defines wether a locus segregates the two groups
For now only works with full segregation
"""
segregators = []
for locus, data in loci.items():
try:
segregators += [locus for k, v in data[0].items()
if (data[1][k] == 0 and v == 1)
or (data[1][k] == 1 and v == 0)]
except AttributeError:
pass
return segregators
if __name__ == "__main__":
from sys import argv
SEG_LOCI = vcf_parser(argv[1], argv[2])
for i in segregating_freqs(SEG_LOCI):
print(i)
| gpl-3.0 | -5,112,938,407,795,405,000 | 30.938776 | 84 | 0.602875 | false |
mjafin/bcbio-nextgen | bcbio/variation/validate.py | 1 | 28334 | """Perform validation of final calls against known reference materials.
Automates the process of checking pipeline results against known valid calls
to identify discordant variants. This provides a baseline for ensuring the
validity of pipeline updates and algorithm changes.
"""
import collections
import contextlib
import csv
import os
import shutil
import subprocess
import time
from pysam import VariantFile
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.bam import callable
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import bubbletree
from bcbio.pipeline import config_utils, shared
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bedutils, validateplot, vcfutils, multi, naming
# ## Individual sample comparisons
def _get_validate(data):
"""Retrieve items to validate, from single samples or from combined joint calls.
"""
if data.get("vrn_file") and tz.get_in(["config", "algorithm", "validate"], data):
return data
elif "group_orig" in data:
for sub in multi.get_orig_items(data):
if "validate" in sub["config"]["algorithm"]:
sub_val = utils.deepish_copy(sub)
sub_val["vrn_file"] = data["vrn_file"]
return sub_val
return None
def normalize_input_path(x, data):
"""Normalize path for input files, handling relative paths.
Looks for non-absolute paths in local and fastq directories
"""
if x is None:
return None
elif os.path.isabs(x):
return os.path.normpath(x)
else:
for d in [data["dirs"].get("fastq"), data["dirs"].get("work")]:
if d:
cur_x = os.path.normpath(os.path.join(d, x))
if os.path.exists(cur_x):
return cur_x
raise IOError("Could not find validation file %s" % x)
def _gunzip(f, data):
if f is None:
return None
elif f.endswith(".gz"):
out_file = f.replace(".gz", "")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "gunzip -c {f} > {tx_out_file}"
do.run(cmd.format(**locals()), "gunzip input file")
return out_file
else:
return f
def _get_caller(data):
callers = [tz.get_in(["config", "algorithm", "jointcaller"], data),
tz.get_in(["config", "algorithm", "variantcaller"], data),
"precalled"]
return [c for c in callers if c][0]
def _get_caller_supplement(caller, data):
"""Some callers like MuTect incorporate a second caller for indels.
"""
if caller == "mutect":
icaller = tz.get_in(["config", "algorithm", "indelcaller"], data)
if icaller:
caller = "%s/%s" % (caller, icaller)
return caller
def _normalize_cwl_inputs(items):
"""Extract variation and validation data from CWL input list of batched samples.
"""
with_validate = []
vrn_files = []
for data in items:
if tz.get_in(["config", "algorithm", "validate"], data):
with_validate.append(data)
if data.get("vrn_file"):
vrn_files.append(data["vrn_file"])
if len(with_validate) == 0:
return items[0]
else:
assert len(set([tz.get_in(["config", "algorithm", "validate"], data) for data in with_validate])) == 1
assert len(set(vrn_files)) == 1
data = with_validate[0]
data["vrn_file"] = vrn_files[0]
return data
def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
if isinstance(data, (list, tuple)):
data = _normalize_cwl_inputs(data)
toval_data = _get_validate(data)
if toval_data:
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller))
if isinstance(toval_data["vrn_file"], (list, tuple)):
raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"])
else:
vrn_file = os.path.abspath(toval_data["vrn_file"])
rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"),
toval_data),
toval_data)
rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data,
bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep")))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(data), data["genome_build"], base_dir, data)
rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(data),
data["genome_build"], base_dir, data)
if rm_interval_file else None)
vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg")
if not vcfutils.vcf_has_variants(vrn_file):
# RTG can fail on totally empty files. Skip these since we have nothing.
pass
# empty validation file, every call is a false positive
elif not vcfutils.vcf_has_variants(rm_file):
eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "rtg":
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "hap.py":
data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
elif vmethod == "bcbio.variation":
data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir,
sample, caller, toval_data)
return [[data]]
# ## Empty truth sets
def _setup_call_fps(vrn_file, rm_bed, base_dir, data):
"""Create set of false positives for inputs with empty truth sets.
"""
out_file = os.path.join(base_dir, "fp.vcf.gz")
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = ("bcftools view -R {rm_bed} -f 'PASS,.' {vrn_file} -O z -o {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare false positives with empty reference", data)
return {"fp": out_file}
# ## Real Time Genomics vcfeval
def _get_sample_and_caller(data):
return [tz.get_in(["metadata", "validate_sample"], data) or dd.get_sample_name(data),
_get_caller_supplement(_get_caller(data), data)]
def _rtg_add_summary_file(eval_files, base_dir, data):
"""Parse output TP FP and FN files to generate metrics for plotting.
"""
out_file = os.path.join(base_dir, "validate-summary.csv")
if not utils.file_uptodate(out_file, eval_files.get("tp", eval_files["fp"])):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
base = _get_sample_and_caller(data)
for metric in ["tp", "fp", "fn"]:
for vtype, bcftools_types in [("SNPs", "--types snps"),
("Indels", "--exclude-types snps")]:
in_file = eval_files.get(metric)
if in_file and os.path.exists(in_file):
cmd = ("bcftools view {bcftools_types} {in_file} | grep -v ^# | wc -l")
count = int(subprocess.check_output(cmd.format(**locals()), shell=True))
else:
count = 0
writer.writerow(base + [vtype, metric, count])
eval_files["summary"] = out_file
return eval_files
def _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Prepare input VCF and BED files for validation.
"""
if not rm_file.endswith(".vcf.gz") or not os.path.exists(rm_file + ".tbi"):
rm_file = vcfutils.bgzip_and_index(rm_file, data["config"], out_dir=base_dir)
if len(vcfutils.get_samples(vrn_file)) > 1:
base, ext = utils.splitext_plus(os.path.basename(vrn_file))
sample_file = os.path.join(base_dir, "%s-%s%s" % (base, dd.get_sample_name(data), ext))
vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_file, data["config"])
if not vrn_file.endswith(".vcf.gz") or not os.path.exists(vrn_file + ".tbi"):
vrn_file = vcfutils.bgzip_and_index(vrn_file, data["config"], out_dir=base_dir)
interval_bed = _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data)
return vrn_file, rm_file, interval_bed
def _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Run evaluation of a caller against the truth set using rtg vcfeval.
"""
out_dir = os.path.join(base_dir, "rtg")
if not utils.file_exists(os.path.join(out_dir, "done")):
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
vrn_file, rm_file, interval_bed = _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data)
rtg_ref = tz.get_in(["reference", "rtg"], data)
assert rtg_ref and os.path.exists(rtg_ref), ("Did not find rtg indexed reference file for validation:\n%s\n"
"Run bcbio_nextgen.py upgrade --data --aligners rtg" % rtg_ref)
# handle CWL where we have a reference to a single file in the RTG directory
if os.path.isfile(rtg_ref):
rtg_ref = os.path.dirname(rtg_ref)
# get core and memory usage from standard configuration
threads = min(dd.get_num_cores(data), 6)
resources = config_utils.get_resources("rtg", data["config"])
memory = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms500m", "-Xmx1500m"]),
{"algorithm": {"memory_adjust": {"magnitude": threads,
"direction": "increase"}}})
jvm_stack = [x for x in memory if x.startswith("-Xms")]
jvm_mem = [x for x in memory if x.startswith("-Xmx")]
jvm_stack = jvm_stack[0] if len(jvm_stack) > 0 else "-Xms500m"
jvm_mem = jvm_mem[0].replace("-Xmx", "") if len(jvm_mem) > 0 else "3g"
cmd = ["rtg", "vcfeval", "--threads", str(threads),
"-b", rm_file, "--bed-regions", interval_bed,
"-c", vrn_file, "-t", rtg_ref, "-o", out_dir]
cmd += ["--vcf-score-field='%s'" % (_pick_best_quality_score(vrn_file))]
mem_export = "%s export RTG_JAVA_OPTS='%s' && export RTG_MEM=%s" % (utils.local_path_export(),
jvm_stack, jvm_mem)
cmd = mem_export + " && " + " ".join(cmd)
do.run(cmd, "Validate calls using rtg vcfeval", data)
out = {"fp": os.path.join(out_dir, "fp.vcf.gz"),
"fn": os.path.join(out_dir, "fn.vcf.gz")}
tp_calls = os.path.join(out_dir, "tp.vcf.gz")
tp_baseline = os.path.join(out_dir, "tp-baseline.vcf.gz")
if os.path.exists(tp_baseline):
out["tp"] = tp_baseline
out["tp-calls"] = tp_calls
else:
out["tp"] = tp_calls
return out
def _pick_best_quality_score(vrn_file):
"""Flexible quality score selection, picking the best available.
Implementation based on discussion:
https://github.com/chapmanb/bcbio-nextgen/commit/a538cecd86c0000d17d3f9d4f8ac9d2da04f9884#commitcomment-14539249
(RTG=AVR/GATK=VQSLOD/MuTect=t_lod_fstar, otherwise GQ, otherwise QUAL, otherwise DP.)
For MuTect, it's not clear how to get t_lod_fstar, the right quality score, into VCF cleanly.
MuTect2 has TLOD in the INFO field.
"""
# pysam fails on checking reference contigs if input is empty
if not vcfutils.vcf_has_variants(vrn_file):
return "DP"
to_check = 25
scores = collections.defaultdict(int)
try:
in_handle = VariantFile(vrn_file)
except ValueError:
raise ValueError("Failed to parse input file in preparation for validation: %s" % vrn_file)
with contextlib.closing(in_handle) as val_in:
for i, rec in enumerate(val_in):
if i > to_check:
break
if rec.info.get("VQSLOD") is not None:
scores["INFO=VQSLOD"] += 1
if rec.info.get("TLOD") is not None:
scores["INFO=TLOD"] += 1
for skey in ["AVR", "GQ", "DP"]:
if len(rec.samples) > 0 and rec.samples[0].get(skey) is not None:
scores[skey] += 1
if rec.qual:
scores["QUAL"] += 1
for key in ["AVR", "INFO=VQSLOD", "INFO=TLOD", "GQ", "QUAL", "DP"]:
if scores[key] > 0:
return key
raise ValueError("Did not find quality score for validation from %s" % vrn_file)
def _get_merged_intervals(rm_interval_file, vrn_file, base_dir, data):
"""Retrieve intervals to run validation on, merging reference and callable BED files.
"""
a_intervals = get_analysis_intervals(data, vrn_file, base_dir)
if a_intervals:
final_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
caller = _get_caller(data)
sample = dd.get_sample_name(data)
combo_intervals = os.path.join(base_dir, "%s-%s-%s-wrm.bed" %
(utils.splitext_plus(os.path.basename(final_intervals))[0],
sample, caller))
if not utils.file_uptodate(combo_intervals, final_intervals):
with file_transaction(data, combo_intervals) as tx_out_file:
with utils.chdir(os.path.dirname(tx_out_file)):
# Copy files locally to avoid issues on shared filesystems
# where BEDtools has trouble accessing the same base
# files from multiple locations
a = os.path.basename(final_intervals)
b = os.path.basename(rm_interval_file)
try:
shutil.copyfile(final_intervals, a)
except IOError:
time.sleep(60)
shutil.copyfile(final_intervals, a)
try:
shutil.copyfile(rm_interval_file, b)
except IOError:
time.sleep(60)
shutil.copyfile(rm_interval_file, b)
cmd = ("bedtools intersect -nonamecheck -a {a} -b {b} > {tx_out_file}")
do.run(cmd.format(**locals()), "Intersect callable intervals for rtg vcfeval")
final_intervals = combo_intervals
else:
assert rm_interval_file, "No intervals to subset analysis with"
final_intervals = shared.remove_lcr_regions(rm_interval_file, [data])
return final_intervals
def _callable_from_gvcf(data, vrn_file, out_dir):
"""Retrieve callable regions based on ref call regions in gVCF.
Uses https://github.com/lijiayong/gvcf_regions
"""
methods = {"freebayes": "freebayes", "platypus": "platypus",
"gatk-haplotype": "gatk"}
gvcf_type = methods.get(dd.get_variantcaller(data))
if gvcf_type:
out_file = os.path.join(out_dir, "%s-gcvf-coverage.bed" %
utils.splitext_plus(os.path.basename(vrn_file))[0])
if not utils.file_uptodate(out_file, vrn_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = ("gvcf_regions.py --gvcf_type {gvcf_type} {vrn_file} "
"| bedtools merge > {tx_out_file}")
do.run(cmd.format(**locals()), "Convert gVCF to BED file of callable regions")
return out_file
def get_analysis_intervals(data, vrn_file, base_dir):
"""Retrieve analysis regions for the current variant calling pipeline.
"""
if vrn_file and "gvcf" in dd.get_tools_on(data):
callable_bed = _callable_from_gvcf(data, vrn_file, base_dir)
if callable_bed:
return callable_bed
if data.get("ensemble_bed"):
return data["ensemble_bed"]
elif dd.get_callable_regions(data):
return dd.get_callable_regions(data)
elif data.get("align_bam"):
return callable.sample_callable_bed(data["align_bam"], dd.get_ref_file(data), data)
elif data.get("work_bam"):
return callable.sample_callable_bed(data["work_bam"], dd.get_ref_file(data), data)
elif data.get("work_bam_callable"):
return callable.sample_callable_bed(data["work_bam_callable"], dd.get_ref_file(data), data)
elif tz.get_in(["config", "algorithm", "callable_regions"], data):
return tz.get_in(["config", "algorithm", "callable_regions"], data)
elif tz.get_in(["config", "algorithm", "variant_regions"], data):
return tz.get_in(["config", "algorithm", "variant_regions"], data)
# ## hap.py
def _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Validation with hap.py: https://github.com/Illumina/hap.py
XXX Does not yet parse out metrics for plotting.
"""
out_dir = utils.safe_makedir(os.path.join(base_dir, "happy"))
out_prefix = os.path.join(out_dir, "val")
if not utils.file_exists(out_prefix + ".summary.csv"):
vrn_file, rm_file, interval_bed = _prepare_inputs(vrn_file, rm_file, rm_interval_file, base_dir, data)
cmd = ["hap.py", "-V", "-f", interval_bed, "-r", dd.get_ref_file(data),
"-l", ",".join(_get_location_list(interval_bed)),
"-o", out_prefix, rm_file, vrn_file]
do.run(cmd, "Validate calls using hap.py", data)
return {"vcf": out_prefix + ".vcf.gz"}
def _get_location_list(interval_bed):
"""Retrieve list of locations to analyze from input BED file.
"""
import pybedtools
regions = collections.OrderedDict()
for region in pybedtools.BedTool(interval_bed):
regions[str(region.chrom)] = None
return regions.keys()
# ## bcbio.variation comparison -- deprecated approach
def _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, data):
"""Run validation of a caller against the truth set using bcbio.variation.
"""
val_config_file = _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data)
work_dir = os.path.join(base_dir, "work")
out = {"summary": os.path.join(work_dir, "validate-summary.csv"),
"grading": os.path.join(work_dir, "validate-grading.yaml"),
"discordant": os.path.join(work_dir, "%s-eval-ref-discordance-annotate.vcf" % sample)}
if not utils.file_exists(out["discordant"]) or not utils.file_exists(out["grading"]):
bcbio_variation_comparison(val_config_file, base_dir, data)
out["concordant"] = filter(os.path.exists,
[os.path.join(work_dir, "%s-%s-concordance.vcf" % (sample, x))
for x in ["eval-ref", "ref-eval"]])[0]
return out
def bcbio_variation_comparison(config_file, base_dir, data):
"""Run a variant comparison using the bcbio.variation toolkit, given an input configuration.
"""
tmp_dir = utils.safe_makedir(os.path.join(base_dir, "tmp"))
resources = config_utils.get_resources("bcbio_variation", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])
cmd = ["bcbio-variation"] + jvm_opts + broad.get_default_jvm_opts(tmp_dir) + \
["variant-compare", config_file]
do.run(cmd, "Comparing variant calls using bcbio.variation", data)
def _create_validate_config_file(vrn_file, rm_file, rm_interval_file,
base_dir, data):
config_dir = utils.safe_makedir(os.path.join(base_dir, "config"))
config_file = os.path.join(config_dir, "validate.yaml")
if not utils.file_uptodate(config_file, vrn_file):
with file_transaction(data, config_file) as tx_config_file:
with open(tx_config_file, "w") as out_handle:
out = _create_validate_config(vrn_file, rm_file, rm_interval_file,
base_dir, data)
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
return config_file
def _create_validate_config(vrn_file, rm_file, rm_interval_file, base_dir, data):
"""Create a bcbio.variation configuration input for validation.
"""
ref_call = {"file": str(rm_file), "name": "ref", "type": "grading-ref",
"fix-sample-header": True, "remove-refcalls": True}
a_intervals = get_analysis_intervals(data, vrn_file, base_dir)
if a_intervals:
a_intervals = shared.remove_lcr_regions(a_intervals, [data])
if rm_interval_file:
ref_call["intervals"] = rm_interval_file
eval_call = {"file": vrn_file, "name": "eval", "remove-refcalls": True}
exp = {"sample": data["name"][-1],
"ref": dd.get_ref_file(data),
"approach": "grade",
"calls": [ref_call, eval_call]}
if a_intervals:
exp["intervals"] = os.path.abspath(a_intervals)
if data.get("align_bam"):
exp["align"] = data["align_bam"]
elif data.get("work_bam"):
exp["align"] = data["work_bam"]
return {"dir": {"base": base_dir, "out": "work", "prep": "work/prep"},
"experiments": [exp]}
# ## Summarize comparisons
def _flatten_grading(stats):
vtypes = ["snp", "indel"]
cat = "concordant"
for vtype in vtypes:
yield vtype, cat, stats[cat][cat].get(vtype, 0)
for vtype in vtypes:
for vclass, vitems in sorted(stats["discordant"].get(vtype, {}).iteritems()):
for vreason, val in sorted(vitems.iteritems()):
yield vtype, "discordant-%s-%s" % (vclass, vreason), val
yield vtype, "discordant-%s-total" % vclass, sum(vitems.itervalues())
def _has_grading_info(samples):
for data in (x[0] for x in samples):
for variant in data.get("variants", []):
if variant.get("validate"):
return True
return False
def _group_validate_samples(samples):
extras = []
validated = collections.defaultdict(list)
for data in (x[0] for x in samples):
is_v = False
for variant in data.get("variants", []):
if variant.get("validate"):
is_v = True
if is_v:
for batch_key in (["metadata", "validate_batch"], ["metadata", "batch"],
["description"]):
vname = tz.get_in(batch_key, data)
if vname:
break
if isinstance(vname, (list, tuple)):
vname = vname[0]
validated[vname].append(data)
else:
extras.append([data])
return validated, extras
def summarize_grading(samples):
"""Provide summaries of grading results across all samples.
"""
if not _has_grading_info(samples):
return samples
validate_dir = utils.safe_makedir(os.path.join(samples[0][0]["dirs"]["work"], "validate"))
header = ["sample", "caller", "variant.type", "category", "value"]
validated, out = _group_validate_samples(samples)
for vname, vitems in validated.iteritems():
out_csv = os.path.join(validate_dir, "grading-summary-%s.csv" % vname)
with open(out_csv, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(header)
plot_data = []
plot_files = []
for data in sorted(vitems, key=lambda x: x.get("lane", dd.get_sample_name(x))):
for variant in data.get("variants", []):
if variant.get("validate"):
variant["validate"]["grading_summary"] = out_csv
if tz.get_in(["validate", "grading"], variant):
for row in _get_validate_plotdata_yaml(variant, data):
writer.writerow(row)
plot_data.append(row)
elif tz.get_in(["validate", "summary"], variant):
plot_files.append(variant["validate"]["summary"])
if plot_files:
plots = validateplot.classifyplot_from_plotfiles(plot_files, out_csv)
elif plot_data:
plots = validateplot.create(plot_data, header, 0, data["config"],
os.path.splitext(out_csv)[0])
else:
plots = None
for data in vitems:
for variant in data.get("variants", []):
if variant.get("validate"):
variant["validate"]["grading_plots"] = plots
out.append([data])
return out
def _get_validate_plotdata_yaml(variant, data):
"""Retrieve validation plot data from grading YAML file (old style).
"""
with open(variant["validate"]["grading"]) as in_handle:
grade_stats = yaml.load(in_handle)
for sample_stats in grade_stats:
sample = sample_stats["sample"]
for vtype, cat, val in _flatten_grading(sample_stats):
yield [sample, variant.get("variantcaller", ""),
vtype, cat, val]
# ## Summarize by frequency
def freq_summary(val_file, call_file, truth_file, target_name):
"""Summarize true and false positive calls by variant type and frequency.
Resolve differences in true/false calls based on output from hap.py:
https://github.com/sequencing/hap.py
"""
out_file = "%s-freqs.csv" % utils.splitext_plus(val_file)[0]
truth_freqs = _read_truth_freqs(truth_file)
call_freqs = _read_call_freqs(call_file, target_name)
with VariantFile(val_file) as val_in:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["vtype", "valclass", "freq"])
for rec in val_in:
call_type = _classify_rec(rec)
val_type = _get_validation_status(rec)
key = _get_key(rec)
freq = truth_freqs.get(key, call_freqs.get(key, 0.0))
writer.writerow([call_type, val_type, freq])
return out_file
def _get_key(rec):
return (rec.contig, rec.pos, rec.ref, rec.alts[0])
def _classify_rec(rec):
"""Determine class of variant in the record.
"""
if max([len(x) for x in rec.alleles]) == 1:
return "snp"
else:
return "indel"
def _get_validation_status(rec):
"""Retrieve the status of the validation, supporting hap.py output
"""
return rec.info["type"]
def _read_call_freqs(in_file, sample_name):
"""Identify frequencies for calls in the input file.
"""
out = {}
with VariantFile(in_file) as call_in:
for rec in call_in:
if rec.filter.keys() == ["PASS"]:
for name, sample in rec.samples.items():
if name == sample_name:
alt, depth, freq = bubbletree.sample_alt_and_depth(rec, sample)
if freq is not None:
out[_get_key(rec)] = freq
return out
def _read_truth_freqs(in_file):
"""Read frequency of calls from truth VCF.
Currently handles DREAM data, needs generalization for other datasets.
"""
out = {}
with VariantFile(in_file) as bcf_in:
for rec in bcf_in:
freq = float(rec.info.get("VAF", 1.0))
out[_get_key(rec)] = freq
return out
| mit | 3,780,586,064,425,050,600 | 45.297386 | 116 | 0.581986 | false |
xavi783/u-tad | Modulo8/modules/data/main.py | 1 | 1377 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 28 17:28:20 2014
@author: xavi783
"""
import json
import numpy as np
import pandas as pd
import pandas.io.data as web
import datetime as dt
from tornado.web import RequestHandler
START_DATE=dt.datetime(2000,1,1)
NAMES = ['AAPL','XOM','MSFT','JNJ','BRK.B','WFC','GE','PG','JPM','PFE']
symbols = pd.concat([web.get_data_yahoo(i, START_DATE)['Adj Close'] for i in NAMES],1)
symbols.columns = NAMES
symbols.index = [i.date() for i in list(symbols.index)]
symbols.index.names = ["date"]
panel_corr = pd.rolling_corr(symbols.pct_change(),21)
dates = np.array(map(lambda d: d.toordinal(), symbols.index))
class StockHandler(RequestHandler):
def get(self):
self.write(symbols.to_csv())
self.finish()
class CorrelationHandler(RequestHandler):
encoder = json.JSONEncoder()
def get_correlation(self,*date):
f = lambda x: x[x<0][-1];
find_date = lambda d,dates: list(np.argwhere(f((dates-dt.datetime(*d).toordinal()))==(dates-dt.datetime(*d).toordinal())).flat)[0]
get_date = lambda d,dates: symbols.ix[find_date(d,dates)+[1,2],:].index[0]
return json.dumps((panel_corr[get_date(date,dates)].values).tolist())
def post(self):
fecha = tuple([int(i) for i in self.request.body.split("-")])
self.write(self.encoder.encode(self.get_correlation(*fecha))) | gpl-3.0 | -2,924,266,324,681,359,400 | 31.809524 | 138 | 0.660857 | false |
samatt/OpenWPM | automation/platform_utils.py | 1 | 5891 | from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from pyvirtualdisplay import Display
from collections import OrderedDict
from selenium import webdriver
from tabulate import tabulate
from copy import deepcopy
import subprocess
import shutil
import json
import time
import sys
import os
def get_version():
"""Return OpenWPM version tag/current commit and Firefox version """
openwpm = subprocess.check_output(["git","describe","--tags"]).strip()
ff_ini = os.path.join(os.path.dirname(__file__), '../firefox-bin/application.ini')
with open(ff_ini, 'r') as f:
ff = None
for line in f:
if line.startswith('Version='):
ff = line[8:].strip()
break
return openwpm, ff
def get_configuration_string(manager_params, browser_params, versions):
"""Construct a well-formatted string for {manager,browser}params
Constructs a pretty printed string of all parameters. The config
dictionaries are split to try to avoid line wrapping for reasonably
size terminal windows.
"""
config_str = "\n\nOpenWPM Version: %s\nFirefox Version: %s\n" % versions
config_str += "\n========== Manager Configuration ==========\n"
config_str += json.dumps(manager_params, sort_keys=True,
indent=2, separators=(',', ': '))
config_str += "\n\n========== Browser Configuration ==========\n"
print_params = [deepcopy(x) for x in browser_params]
ext_settings = list()
table_input = list()
profile_dirs = OrderedDict()
archive_dirs = OrderedDict()
extension_all_disabled = profile_all_none = archive_all_none = True
for item in print_params:
crawl_id = item['crawl_id']
# Update print flags
if item['profile_tar'] is not None:
profile_all_none = False
if item['profile_archive_dir'] is not None:
archive_all_none = False
if item['extension']['enabled']:
extension_all_disabled = False
# Separate out extension settings
dct = OrderedDict()
dct['crawl_id'] = crawl_id
dct.update(item.pop('extension'))
ext_settings.append(dct)
# Separate out long profile directory strings
profile_dirs[crawl_id] = item.pop('profile_tar')
archive_dirs[crawl_id] = item.pop('profile_archive_dir')
# Copy items in sorted order
dct = OrderedDict()
dct[u'crawl_id'] = crawl_id
for key in sorted(item.keys()):
dct[key] = item[key]
table_input.append(dct)
key_dict = OrderedDict()
counter = 0
for key in table_input[0].keys():
key_dict[key] = counter
counter += 1
config_str += "Keys:\n"
config_str += json.dumps(key_dict, indent=2,
separators=(',', ': '))
config_str += '\n\n'
config_str += tabulate(table_input, headers=key_dict)
config_str += "\n\n========== Extension Configuration ==========\n"
if extension_all_disabled:
config_str += " No extensions enabled"
else:
config_str += tabulate(ext_settings, headers="keys")
config_str += "\n\n========== Input profile tar files ==========\n"
if profile_all_none:
config_str += " No profile tar files specified"
else:
config_str += json.dumps(profile_dirs, indent=2,
separators=(',', ': '))
config_str += "\n\n========== Output (archive) profile directories ==========\n"
if archive_all_none:
config_str += " No profile archive directories specified"
else:
config_str += json.dumps(archive_dirs, indent=2,
separators=(',', ': '))
config_str += '\n\n'
return config_str
def fetch_adblockplus_list(output_directory, wait_time=20):
""" Saves an updated AdBlock Plus list to the specified directory.
<output_directory> - The directory to save lists to. Will be created if it
does not already exist.
"""
output_directory = os.path.expanduser(output_directory)
# Start a virtual display
display = Display(visible=0)
display.start()
root_dir = os.path.dirname(__file__)
fb = FirefoxBinary(os.path.join(root_dir,'../firefox-bin/firefox'))
fp = webdriver.FirefoxProfile()
browser_path = fp.path + '/'
# Enable AdBlock Plus - Uses "Easy List" by default
# "Allow some non-intrusive advertising" disabled
fp.add_extension(extension=os.path.join(root_dir,'DeployBrowsers/firefox_extensions/adblock_plus-2.7.xpi'))
fp.set_preference('extensions.adblockplus.subscriptions_exceptionsurl', '')
fp.set_preference('extensions.adblockplus.subscriptions_listurl', '')
fp.set_preference('extensions.adblockplus.subscriptions_fallbackurl', '')
fp.set_preference('extensions.adblockplus.subscriptions_antiadblockurl', '')
fp.set_preference('extensions.adblockplus.suppress_first_run_page', True)
fp.set_preference('extensions.adblockplus.notificationurl', '')
# Force pre-loading so we don't allow some ads through
fp.set_preference('extensions.adblockplus.please_kill_startup_performance', True)
print "Starting webdriver with AdBlockPlus activated"
driver = webdriver.Firefox(firefox_profile = fp, firefox_binary = fb)
print "Sleeping %i seconds to give the list time to download" % wait_time
time.sleep(wait_time)
if not os.path.isdir(output_directory):
print "Output directory %s does not exist, creating." % output_directory
os.makedirs(output_directory)
print "Copying blocklists to %s" % output_directory
try:
shutil.copy(browser_path+'adblockplus/patterns.ini', output_directory)
shutil.copy(browser_path+'adblockplus/elemhide.css', output_directory)
finally:
driver.close()
display.stop()
| gpl-3.0 | -3,232,909,277,713,884,000 | 37.756579 | 111 | 0.634357 | false |
phiros/nepi | src/nepi/resources/ns3/ns3propagationdelaymodel.py | 1 | 1830 | #
# NEPI, a framework to manage network experiments
# Copyright (C) 2014 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <[email protected]>
from nepi.execution.resource import clsinit_copy
from nepi.resources.ns3.ns3base import NS3Base
@clsinit_copy
class NS3BasePropagationDelayModel(NS3Base):
_rtype = "abstract::ns3::PropagationDelayModel"
@property
def simulation(self):
return self.channel.simulation
@property
def channel(self):
from nepi.resources.ns3.ns3wifichannel import NS3BaseWifiChannel
channels = self.get_connected(NS3BaseWifiChannel.get_rtype())
if not channels:
msg = "PropagationDelayModel not connected to channel"
self.error(msg)
raise RuntimeError, msg
return channels[0]
@property
def _rms_to_wait(self):
others = set()
others.add(self.channel)
return others
def _connect_object(self):
channel = self.channel
if channel.uuid not in self.connected:
self.simulation.invoke(channel.uuid, "SetPropagationDelayModel", self.uuid)
self._connected.add(channel.uuid)
| gpl-3.0 | -5,356,266,568,432,127,000 | 32.888889 | 87 | 0.693443 | false |
tlambert03/grouper | test.py | 1 | 1056 | import grouper as G
from multiprocessing import Pool
import sys
cores = 8 if len(sys.argv) < 2 else int(float(sys.argv[1]))
iterations_per_thread = 1000 if len(sys.argv) < 3 else int(float(sys.argv[2]))
n = G.params.numrotations
stations = G.params.stations
if __name__ == '__main__':
p = Pool(processes=cores)
results = p.map(G.parallel_shuffle, [(stations,n,iterations_per_thread,iterations_per_thread/10)]*cores)
print
print
print "--------------"
print 'BEST RESULT:'
print
bench = float("inf")
for sol in results:
s = G.scoreSolution(sol)
gs = sum([g[0] for g in s])
rs = sum([g[1] for g in s])
cs = gs * rs
if cs < bench:
bench = cs
best = sol
print best.printSol()
s = G.scoreSolution(best)
gs = sum([g[0] for g in s])
rs = sum([g[1] for g in s])
cs = gs * rs
print sol.part
print "Partition Score: ", gs
print sol.schedule
print "Rotation Score: ", rs
print "Combo Score: ", cs
print "--------------" | mit | -1,905,808,509,131,380,000 | 26.102564 | 108 | 0.569129 | false |
tschalch/pyTray | src/gui/error_window.py | 1 | 2153 | #!/usr/bin/env python2.3
"""
Error window that pops up and displays unhandled errors
"""
from wx.lib.dialogs import *
import wx
import sys, traceback
class ErrorDialog(wx.Dialog):
def __init__(self, parent, msg, caption,
pos=wx.DefaultPosition, size=(500,300),
style=wx.DEFAULT_DIALOG_STYLE):
wx.Dialog.__init__(self, parent, -1, caption, pos, size, style)
x, y = pos
if x == -1 and y == -1:
self.CenterOnScreen(wx.BOTH)
self.text = wx.TextCtrl(self, -1, msg,
style=wx.TE_MULTILINE | wx.TE_READONLY)
okID = wx.NewId()
ok = wx.Button(self, okID, "OK")
self.Bind(wx.EVT_BUTTON, self.OnButton, ok)
self.Bind(wx.EVT_CLOSE, self.OnButton)
ok.SetDefault()
lc = layoutf.Layoutf('t=t5#1;b=t5#2;l=l5#1;r=r5#1', (self,ok))
self.text.SetConstraints(lc)
lc = layoutf.Layoutf('b=b5#1;x%w50#1;w!80;h*', (self,))
ok.SetConstraints(lc)
self.SetAutoLayout(1)
self.Layout()
def OnButton(self, event):
self.Destroy()
def write(self, msg):
self.text.AppendText(msg)
class ErrorHandler:
def __init__(self):
self.dialog = None
def write(self, msg):
try:
if not self.dialog:
self.dialog = ErrorDialog(None, "Ooops, this looks like bug! Please send the error message to [email protected]\n\n", "ErrorWindow")
self.dialog.Show()
if not self.dialog.IsShown():
self.dialog = ErrorDialog(None, "Error:", "ErrorWindow")
self.dialog.Show()
self.dialog.write(msg)
except:
sys.stderr = sys.__stderr__
print traceback.print_exc(file=sys.stdout)
raise
class GuiApp(wx.App):
def OnInit(self):
return True
if __name__ == "__main__":
app = GuiApp(0)
app.MainLoop()
hdl = ErrorHandler()
hdl.write("Test") | bsd-3-clause | 2,089,554,167,221,337,300 | 29.691176 | 155 | 0.51974 | false |
Godley/MuseParse | MuseParse/tests/testLilyMethods/testPart.py | 1 | 4033 | from MuseParse.classes.ObjectHierarchy.ItemClasses import Note
from MuseParse.classes.ObjectHierarchy.TreeClasses.PartNode import PartNode
from MuseParse.tests.testLilyMethods.lily import Lily
class testPartMeasureWithNote(Lily):
def setUp(self):
self.item = PartNode()
self.item.addEmptyMeasure(1, 1)
measure = self.item.getMeasure(1, 1)
note = Note.Note()
note.pitch = Note.Pitch()
measure.addNote(note)
self.lilystring = [
"zerostaffone = \\new Staff{ % measure 1\nc' | \n\n }\n\n",
'\\zerostaffone']
class testPartMultistafftavesWithName(Lily):
def setUp(self):
self.item = PartNode()
self.item.GetItem().name = "Piano"
self.item.addEmptyMeasure(1, 1)
measure = self.item.getMeasure(1, 1)
note = Note.Note()
note.pitch = Note.Pitch()
measure.addNote(note)
self.item.addEmptyMeasure(1, 2)
measure2 = self.item.getMeasure(1, 2)
note2 = Note.Note()
note2.pitch = Note.Pitch()
measure2.addNote(note2)
self.lilystring = [
"zerostaffone = \\new Staff{ % measure 1\nc' | \n\n }\n\nzerostafftwo = \\new Staff{ % measure 1\nc' | \n\n }\n\n",
"\\new StaffGroup \\with {\ninstrumentName = \markup { \n\r \column { \n\r\r \line { \"Piano\" \n\r\r } \n\r } \n } \n }<<\zerostaffone\n\zerostafftwo>>"]
class testPartMultistafftaves(Lily):
def setUp(self):
self.item = PartNode()
self.item.addEmptyMeasure(1, 1)
self.item.addEmptyMeasure(1, 2)
measure1 = self.item.getMeasure(1, 1)
measure2 = self.item.getMeasure(1, 2)
note1 = Note.Note()
note1.pitch = Note.Pitch()
note2 = Note.Note()
note2.pitch = Note.Pitch()
measure1.addNote(note1)
measure2.addNote(note2)
self.lilystring = [
"zerostaffone = \\new Staff{ % measure 1\nc' | \n\n }\n\nzerostafftwo = \\new Staff{ % measure 1\nc' | \n\n }\n\n",
"\\new StaffGroup <<\zerostaffone\n\zerostafftwo>>"]
class testPartMultiBars(Lily):
def setUp(self):
self.item = PartNode()
self.item.addEmptyMeasure(1, 1)
self.item.addEmptyMeasure(2, 1)
measure2 = self.item.getMeasure(2, 1)
measure = self.item.getMeasure(1, 1)
note = Note.Note()
note.pitch = Note.Pitch()
note2 = Note.Note()
note2.pitch = Note.Pitch()
measure.addNote(note)
measure2.addNote(note2)
self.lilystring = [
"zerostaffone = \\new Staff{ % measure 1\nc' | \n\n % measure 2\nc' | \n\n }\n\n",
"\\zerostaffone"]
class testPartMultiBarsstafftaves(Lily):
def setUp(self):
self.item = PartNode()
self.item.addEmptyMeasure(1, 1)
measure = self.item.getMeasure(1, 1)
note = Note.Note()
note.pitch = Note.Pitch()
measure.addNote(note)
self.item.addEmptyMeasure(1, 2)
measure2 = self.item.getMeasure(1, 2)
note2 = Note.Note()
note2.pitch = Note.Pitch()
measure2.addNote(note2)
self.item.addEmptyMeasure(2, 1)
measure3 = self.item.getMeasure(2, 1)
note3 = Note.Note()
note3.pitch = Note.Pitch()
measure3.addNote(note3)
self.lilystring = [
"zerostaffone = \\new Staff{ % measure 1\nc' | \n\n % measure 2\nc' | \n\n }\n\nzerostafftwo = \\new Staff{ % measure 1\nc' | \n\n }\n\n",
"\\new StaffGroup <<\\zerostaffone\n\\zerostafftwo>>"]
class testPartWithName(Lily):
def setUp(self):
self.item = PartNode()
self.item.addEmptyMeasure(1, 1)
self.item.GetItem().name = "charlotte"
self.lilystring = [
"zerostaffone = \\new Staff \with {\ninstrumentName = \\markup { \n\r \\column { \n\r\r \\line { \"charlotte\" \n\r\r } \n\r } \n } \n }{ % measure 1\n | \n\n }\n\n",
"\zerostaffone"]
def tearDown(self):
self.item = None
| mit | 7,874,060,913,794,427,000 | 34.690265 | 178 | 0.578973 | false |
mgrygoriev/CloudFerry | cloudferrylib/os/discovery/stages.py | 1 | 2752 | # Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_utils import importutils
from cloudferrylib import stage
from cloudferrylib.os.discovery import model
LOG = logging.getLogger(__name__)
class DiscoverStage(stage.Stage):
def __init__(self):
super(DiscoverStage, self).__init__()
self.missing_clouds = None
def invalidate(self, old_signature, new_signature, force=False):
"""
Remove data related to any cloud that changed signature.
"""
if force:
with model.Session() as session:
session.delete()
return
self.missing_clouds = []
# Create set of cloud names that which data is not valid anymore
old_clouds = set(old_signature.keys())
invalid_clouds = old_clouds.difference(new_signature.keys())
for name, signature in new_signature.items():
if name not in old_signature:
self.missing_clouds.append(name)
continue
if old_signature[name] != signature:
self.missing_clouds.append(name)
invalid_clouds.add(name)
with model.Session() as session:
for cloud in invalid_clouds:
session.delete(cloud=cloud)
def signature(self, config):
"""
Discovery signature is based on configuration. Each configured cloud
have it's own signature.
"""
return {n: [c.credential.auth_url, c.credential.region_name]
for n, c in config.clouds.items()}
def execute(self, config):
"""
Execute discovery.
"""
if self.missing_clouds is None:
self.missing_clouds = config.clouds.keys()
for cloud_name in self.missing_clouds:
cloud = config.clouds[cloud_name]
for class_name in cloud.discover:
cls = importutils.import_class(class_name)
LOG.info('Starting discover %s objects in %s cloud',
cls.__name__, cloud_name)
cls.discover(cloud)
LOG.info('Done discovering %s objects in %s cloud',
cls.__name__, cloud_name)
| apache-2.0 | -3,448,828,013,168,188,400 | 34.282051 | 76 | 0.616642 | false |
daatrujillopu/Sfotipy | actions.py | 1 | 1933 | __author__ = 'danny'
import csv
import logging
import tablib
from datetime import datetime
from django.db.models import Model
from django.db.models.fields.files import FieldFile
from unicodedata import normalize
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.template import Context, Template
from django.conf import settings
from django.core.urlresolvers import reverse
def export_as_excel(modeladmin, request, queryset):
if not request.user.is_staff:
raise PermissionDenied
opts = modeladmin.model._meta
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=%s.xls' % str(opts).replace('.', '_')
try:
field_names = modeladmin.model.get_csv_fields()
v_field_names = field_names
except:
field_names = [field.name for field in opts.fields]
v_field_names = [getattr(field, 'verbose_name') or field.name for field in opts.fields]
v_field_names = map(lambda x: x if x != 'ID' else 'Id', v_field_names)
ax = []
headers = v_field_names
data = []
data = tablib.Dataset(*data, headers=headers)
for obj in queryset:
acc = []
for field in field_names:
try:
uf = getattr(obj, field)()
except TypeError:
try:
uf = getattr(obj, field)
except:
uf = ' error obteniendo el dato'
if uf is None:
uf = ''
elif isinstance(uf, datetime):
uf = str(uf)
elif isinstance(uf, Model):
uf = str(uf)
elif isinstance(uf, FieldFile):
uf = str(uf.url)
acc.append(uf)
data.append(acc)
response.write(data.xls)
return response
export_as_excel.short_description = "Exportar como Excel" | mit | -5,425,386,611,052,412,000 | 31.233333 | 97 | 0.612519 | false |
stefan-jonasson/home-assistant | homeassistant/components/zwave/node_entity.py | 1 | 8320 | """Entity class that represents Z-Wave node."""
import logging
from homeassistant.core import callback
from homeassistant.const import ATTR_BATTERY_LEVEL, ATTR_WAKEUP, ATTR_ENTITY_ID
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from .const import (
ATTR_NODE_ID, COMMAND_CLASS_WAKE_UP, ATTR_SCENE_ID, ATTR_SCENE_DATA,
ATTR_BASIC_LEVEL, EVENT_NODE_EVENT, EVENT_SCENE_ACTIVATED, DOMAIN,
COMMAND_CLASS_CENTRAL_SCENE)
from .util import node_name
_LOGGER = logging.getLogger(__name__)
ATTR_QUERY_STAGE = 'query_stage'
ATTR_AWAKE = 'is_awake'
ATTR_READY = 'is_ready'
ATTR_FAILED = 'is_failed'
ATTR_PRODUCT_NAME = 'product_name'
ATTR_MANUFACTURER_NAME = 'manufacturer_name'
ATTR_NODE_NAME = 'node_name'
STAGE_COMPLETE = 'Complete'
_REQUIRED_ATTRIBUTES = [
ATTR_QUERY_STAGE, ATTR_AWAKE, ATTR_READY, ATTR_FAILED,
'is_info_received', 'max_baud_rate', 'is_zwave_plus']
_OPTIONAL_ATTRIBUTES = ['capabilities', 'neighbors', 'location']
_COMM_ATTRIBUTES = [
'sentCnt', 'sentFailed', 'retries', 'receivedCnt', 'receivedDups',
'receivedUnsolicited', 'sentTS', 'receivedTS', 'lastRequestRTT',
'averageRequestRTT', 'lastResponseRTT', 'averageResponseRTT']
ATTRIBUTES = _REQUIRED_ATTRIBUTES + _OPTIONAL_ATTRIBUTES
class ZWaveBaseEntity(Entity):
"""Base class for Z-Wave Node and Value entities."""
def __init__(self):
"""Initialize the base Z-Wave class."""
self._update_scheduled = False
self.old_entity_id = None
self.new_entity_id = None
def maybe_schedule_update(self):
"""Maybe schedule state update.
If value changed after device was created but before setup_platform
was called - skip updating state.
"""
if self.hass and not self._update_scheduled:
self.hass.add_job(self._schedule_update)
@callback
def _schedule_update(self):
"""Schedule delayed update."""
if self._update_scheduled:
return
@callback
def do_update():
"""Really update."""
self.hass.async_add_job(self.async_update_ha_state)
self._update_scheduled = False
self._update_scheduled = True
self.hass.loop.call_later(0.1, do_update)
def sub_status(status, stage):
"""Format sub-status."""
return '{} ({})'.format(status, stage) if stage else status
class ZWaveNodeEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node."""
def __init__(self, node, network, new_entity_ids):
"""Initialize node."""
# pylint: disable=import-error
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self._network = network
self.node = node
self.node_id = self.node.node_id
self._name = node_name(self.node)
self._product_name = node.product_name
self._manufacturer_name = node.manufacturer_name
self.old_entity_id = "{}.{}_{}".format(
DOMAIN, slugify(self._name), self.node_id)
self.new_entity_id = "{}.{}".format(DOMAIN, slugify(self._name))
if not new_entity_ids:
self.entity_id = self.old_entity_id
self._attributes = {}
self.wakeup_interval = None
self.location = None
self.battery_level = None
dispatcher.connect(
self.network_node_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED)
dispatcher.connect(self.network_node_changed, ZWaveNetwork.SIGNAL_NODE)
dispatcher.connect(
self.network_node_changed, ZWaveNetwork.SIGNAL_NOTIFICATION)
dispatcher.connect(
self.network_node_event, ZWaveNetwork.SIGNAL_NODE_EVENT)
dispatcher.connect(
self.network_scene_activated, ZWaveNetwork.SIGNAL_SCENE_EVENT)
def network_node_changed(self, node=None, value=None, args=None):
"""Handle a changed node on the network."""
if node and node.node_id != self.node_id:
return
if args is not None and 'nodeId' in args and \
args['nodeId'] != self.node_id:
return
# Process central scene activation
if (value is not None and
value.command_class == COMMAND_CLASS_CENTRAL_SCENE):
self.central_scene_activated(value.index, value.data)
self.node_changed()
def get_node_statistics(self):
"""Retrieve statistics from the node."""
return self._network.manager.getNodeStatistics(
self._network.home_id, self.node_id)
def node_changed(self):
"""Update node properties."""
attributes = {}
stats = self.get_node_statistics()
for attr in ATTRIBUTES:
value = getattr(self.node, attr)
if attr in _REQUIRED_ATTRIBUTES or value:
attributes[attr] = value
for attr in _COMM_ATTRIBUTES:
attributes[attr] = stats[attr]
if self.node.can_wake_up():
for value in self.node.get_values(COMMAND_CLASS_WAKE_UP).values():
self.wakeup_interval = value.data
break
else:
self.wakeup_interval = None
self.battery_level = self.node.get_battery_level()
self._attributes = attributes
self.maybe_schedule_update()
def network_node_event(self, node, value):
"""Handle a node activated event on the network."""
if node.node_id == self.node.node_id:
self.node_event(value)
def node_event(self, value):
"""Handle a node activated event for this node."""
if self.hass is None:
return
self.hass.bus.fire(EVENT_NODE_EVENT, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node.node_id,
ATTR_BASIC_LEVEL: value
})
def network_scene_activated(self, node, scene_id):
"""Handle a scene activated event on the network."""
if node.node_id == self.node.node_id:
self.scene_activated(scene_id)
def scene_activated(self, scene_id):
"""Handle an activated scene for this node."""
if self.hass is None:
return
self.hass.bus.fire(EVENT_SCENE_ACTIVATED, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node.node_id,
ATTR_SCENE_ID: scene_id
})
def central_scene_activated(self, scene_id, scene_data):
"""Handle an activated central scene for this node."""
if self.hass is None:
return
self.hass.bus.fire(EVENT_SCENE_ACTIVATED, {
ATTR_ENTITY_ID: self.entity_id,
ATTR_NODE_ID: self.node_id,
ATTR_SCENE_ID: scene_id,
ATTR_SCENE_DATA: scene_data
})
@property
def state(self):
"""Return the state."""
if ATTR_READY not in self._attributes:
return None
stage = ''
if not self._attributes[ATTR_READY]:
# If node is not ready use stage as sub-status.
stage = self._attributes[ATTR_QUERY_STAGE]
if self._attributes[ATTR_FAILED]:
return sub_status('Dead', stage)
if not self._attributes[ATTR_AWAKE]:
return sub_status('Sleeping', stage)
if self._attributes[ATTR_READY]:
return sub_status('Ready', stage)
return stage
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
ATTR_NODE_ID: self.node_id,
ATTR_NODE_NAME: self._name,
ATTR_MANUFACTURER_NAME: self._manufacturer_name,
ATTR_PRODUCT_NAME: self._product_name,
'old_entity_id': self.old_entity_id,
'new_entity_id': self.new_entity_id,
}
attrs.update(self._attributes)
if self.battery_level is not None:
attrs[ATTR_BATTERY_LEVEL] = self.battery_level
if self.wakeup_interval is not None:
attrs[ATTR_WAKEUP] = self.wakeup_interval
return attrs
| mit | 3,290,718,244,363,379,000 | 33.238683 | 79 | 0.608053 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.0-py2.5.egg/sqlalchemy/topological.py | 1 | 11300 | # topological.py
# Copyright (C) 2005, 2006, 2007 Michael Bayer [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Topological sorting algorithms.
The topological sort is an algorithm that receives this list of
dependencies as a *partial ordering*, that is a list of pairs which
might say, *X is dependent on Y*, *Q is dependent on Z*, but does not
necessarily tell you anything about Q being dependent on X. Therefore,
its not a straight sort where every element can be compared to
another... only some of the elements have any sorting preference, and
then only towards just some of the other elements. For a particular
partial ordering, there can be many possible sorts that satisfy the
conditions.
"""
from sqlalchemy import util
from sqlalchemy.exceptions import CircularDependencyError
class _Node(object):
"""Represent each item in the sort.
While the topological sort produces a straight ordered list of
items, ``_Node`` ultimately stores a tree-structure of those items
which are organized so that non-dependent nodes are siblings.
"""
def __init__(self, item):
self.item = item
self.dependencies = util.Set()
self.children = []
self.cycles = None
def __str__(self):
return self.safestr()
def safestr(self, indent=0):
return (' ' * indent * 2) + \
str(self.item) + \
(self.cycles is not None and (" (cycles: " + repr([x for x in self.cycles]) + ")") or "") + \
"\n" + \
''.join([n.safestr(indent + 1) for n in self.children])
def __repr__(self):
return "%s" % (str(self.item))
def all_deps(self):
"""Return a set of dependencies for this node and all its cycles."""
deps = util.Set(self.dependencies)
if self.cycles is not None:
for c in self.cycles:
deps.update(c.dependencies)
return deps
class _EdgeCollection(object):
"""A collection of directed edges."""
def __init__(self):
self.parent_to_children = {}
self.child_to_parents = {}
def add(self, edge):
"""Add an edge to this collection."""
(parentnode, childnode) = edge
if parentnode not in self.parent_to_children:
self.parent_to_children[parentnode] = util.Set()
self.parent_to_children[parentnode].add(childnode)
if childnode not in self.child_to_parents:
self.child_to_parents[childnode] = util.Set()
self.child_to_parents[childnode].add(parentnode)
parentnode.dependencies.add(childnode)
def remove(self, edge):
"""Remove an edge from this collection.
Return the childnode if it has no other parents.
"""
(parentnode, childnode) = edge
self.parent_to_children[parentnode].remove(childnode)
self.child_to_parents[childnode].remove(parentnode)
if len(self.child_to_parents[childnode]) == 0:
return childnode
else:
return None
def has_parents(self, node):
return node in self.child_to_parents and len(self.child_to_parents[node]) > 0
def edges_by_parent(self, node):
if node in self.parent_to_children:
return [(node, child) for child in self.parent_to_children[node]]
else:
return []
def get_parents(self):
return self.parent_to_children.keys()
def pop_node(self, node):
"""Remove all edges where the given node is a parent.
Return the collection of all nodes which were children of the
given node, and have no further parents.
"""
children = self.parent_to_children.pop(node, None)
if children is not None:
for child in children:
self.child_to_parents[child].remove(node)
if not self.child_to_parents[child]:
yield child
def __len__(self):
return sum([len(x) for x in self.parent_to_children.values()])
def __iter__(self):
for parent, children in self.parent_to_children.iteritems():
for child in children:
yield (parent, child)
def __str__(self):
return repr(list(self))
def __repr__(self):
return repr(list(self))
class QueueDependencySorter(object):
"""Topological sort adapted from wikipedia's article on the subject.
It creates a straight-line list of elements, then a second pass
batches non-dependent elements as siblings in a tree structure. Future
versions of this algorithm may separate the "convert to a tree"
step.
"""
def __init__(self, tuples, allitems):
self.tuples = tuples
self.allitems = allitems
def sort(self, allow_self_cycles=True, allow_all_cycles=False):
(tuples, allitems) = (self.tuples, self.allitems)
#print "\n---------------------------------\n"
#print repr([t for t in tuples])
#print repr([a for a in allitems])
#print "\n---------------------------------\n"
nodes = {}
edges = _EdgeCollection()
for item in allitems + [t[0] for t in tuples] + [t[1] for t in tuples]:
if item not in nodes:
node = _Node(item)
nodes[item] = node
for t in tuples:
if t[0] is t[1]:
if allow_self_cycles:
n = nodes[t[0]]
n.cycles = util.Set([n])
continue
else:
raise CircularDependencyError("Self-referential dependency detected " + repr(t))
childnode = nodes[t[1]]
parentnode = nodes[t[0]]
edges.add((parentnode, childnode))
queue = []
for n in nodes.values():
if not edges.has_parents(n):
queue.append(n)
output = []
while nodes:
if not queue:
# edges remain but no edgeless nodes to remove; this indicates
# a cycle
if allow_all_cycles:
for cycle in self._find_cycles(edges):
lead = cycle[0][0]
lead.cycles = util.Set()
for edge in cycle:
n = edges.remove(edge)
lead.cycles.add(edge[0])
lead.cycles.add(edge[1])
if n is not None:
queue.append(n)
for n in lead.cycles:
if n is not lead:
n._cyclical = True
for (n,k) in list(edges.edges_by_parent(n)):
edges.add((lead, k))
edges.remove((n,k))
continue
else:
# long cycles not allowed
raise CircularDependencyError("Circular dependency detected " + repr(edges) + repr(queue))
node = queue.pop()
if not hasattr(node, '_cyclical'):
output.append(node)
del nodes[node.item]
for childnode in edges.pop_node(node):
queue.append(childnode)
return self._create_batched_tree(output)
def _create_batched_tree(self, nodes):
"""Given a list of nodes from a topological sort, organize the
nodes into a tree structure, with as many non-dependent nodes
set as siblings to each other as possible.
"""
if not nodes:
return None
# a list of all currently independent subtrees as a tuple of
# (root_node, set_of_all_tree_nodes, set_of_all_cycle_nodes_in_tree)
# order of the list has no semantics for the algorithmic
independents = []
# in reverse topological order
for node in util.reversed(nodes):
# nodes subtree and cycles contain the node itself
subtree = util.Set([node])
if node.cycles is not None:
cycles = util.Set(node.cycles)
else:
cycles = util.Set()
# get a set of dependent nodes of node and its cycles
nodealldeps = node.all_deps()
if nodealldeps:
# iterate over independent node indexes in reverse order so we can efficiently remove them
for index in xrange(len(independents)-1,-1,-1):
child, childsubtree, childcycles = independents[index]
# if there is a dependency between this node and an independent node
if (childsubtree.intersection(nodealldeps) or childcycles.intersection(node.dependencies)):
# prepend child to nodes children
# (append should be fine, but previous implemetation used prepend)
node.children[0:0] = (child,)
# merge childs subtree and cycles
subtree.update(childsubtree)
cycles.update(childcycles)
# remove the child from list of independent subtrees
independents[index:index+1] = []
# add node as a new independent subtree
independents.append((node,subtree,cycles))
# choose an arbitrary node from list of all independent subtrees
head = independents.pop()[0]
# add all other independent subtrees as a child of the chosen root
# used prepend [0:0] instead of extend to maintain exact behaviour of previous implementation
head.children[0:0] = [i[0] for i in independents]
return head
def _find_cycles(self, edges):
involved_in_cycles = util.Set()
cycles = {}
def traverse(node, goal=None, cycle=None):
if goal is None:
goal = node
cycle = []
elif node is goal:
return True
for (n, key) in edges.edges_by_parent(node):
if key in cycle:
continue
cycle.append(key)
if traverse(key, goal, cycle):
cycset = util.Set(cycle)
for x in cycle:
involved_in_cycles.add(x)
if x in cycles:
existing_set = cycles[x]
[existing_set.add(y) for y in cycset]
for y in existing_set:
cycles[y] = existing_set
cycset = existing_set
else:
cycles[x] = cycset
cycle.pop()
for parent in edges.get_parents():
traverse(parent)
# sets are not hashable, so uniquify with id
unique_cycles = dict([(id(s), s) for s in cycles.values()]).values()
for cycle in unique_cycles:
edgecollection = [edge for edge in edges
if edge[0] in cycle and edge[1] in cycle]
yield edgecollection
| bsd-3-clause | 3,143,276,866,390,748,000 | 37.69863 | 111 | 0.549115 | false |
gugarosa/LibKeras | train.py | 1 | 3047 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import common
import gzip
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle as pkl
import sys
from data import load_data
from itertools import product
from os.path import join
from sklearn.model_selection import train_test_split
# Default directory
root_out = common.default_path() + '/outputs'
# Type of data entries
n_runs = 1
datasets = [{'name': 'brain', 'n_classes': 2}]
data_types = [{'type': 'matlab', 'ref': False}]
normalization_methods = ['none']
test_sizes = [0.2]
params = list(product(datasets, data_types, normalization_methods, test_sizes))
# Fixed parameters
learning_rate = 0.01
momentum = 0.9
decay = 0.0005
nesterov = True
batch_size = 32
n_epochs = 90
val_size = 0.1
metric = 'accuracy'
loss_func = 'categorical_crossentropy'
results = []
# Loop to hold all the desired configurations
for d, dt, nm, ts in params:
for i in range(n_runs):
data, labels = load_data(d['name'], dt['type'], dt['ref'], nm)
input_shape = data.shape[1:]
# Splitting data into training and test sets
data_train, data_test, lab_train, lab_test = train_test_split(data, labels, test_size=ts, random_state=i)
# Building CNN, note that you can choose the build function according to common.py
cnet = common.ConvNet()
cnet.build_samplenet(include_top=True, weights=None, input_shape=input_shape, classes=d['n_classes'])
# Compiling current network
cnet.compile(learning_rate=learning_rate, momentum=momentum, decay=decay, nesterov=nesterov, metric=metric, loss_func=loss_func)
# Training current network
cnet.train(data_train, lab_train, d['n_classes'], batch_size=batch_size, n_epochs=n_epochs, validation_size=val_size, loss_func=loss_func)
# Evaluating current network
acc = cnet.evaluate(data_test, lab_test, d['n_classes'], batch_size)
# Saving network model
mname = '%s_model.h5' % (d['name'])
cnet.save_model(join(root_out, 'models', mname))
# Saving trained network weights
wname = '%s_%s_%s_%.2f_%02i.h5' % (d['name'], dt['type'], nm, ts, i)
cnet.save_weight(join(root_out, 'weights', wname))
# Plotting the accuracy history
history = cnet.get_history()
fname = '%s_%s_%s_%.2f_%02i' % (d['name'], dt['type'], nm, ts, i)
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.savefig(join(root_out, 'history', 'history_' + fname + '.jpg'))
plt.close()
# Dumping history to a .pkl file
pkl.dump(history, gzip.open(join(root_out, 'history', 'history_' + fname + '.pkl'), 'wb'))
# Saving results output on a .csv file
results.append([d['name'], dt['type'], nm, ts, i, acc])
cnet = None
df = pd.DataFrame(results, columns=['dataset', 'data_type', 'normalization_method', 'test_size', 'running_num', 'acc'])
df.to_csv(join(root_out, 'results.csv'))
# End of current iteration
print("\n[INFO] Running #{:d} ok!".format(i))
import gc; gc.collect()
| gpl-3.0 | -7,683,564,740,032,855,000 | 32.119565 | 140 | 0.685921 | false |
juanyunis/django-resumes | resumes/admin.py | 1 | 1253 | from django.contrib import admin
from models import *
class UserResumeEducationInline(admin.StackedInline):
model = UserResumeEducation
extra = 1
class UserResumeLanguageInline(admin.StackedInline):
model = UserResumeLanguage
extra = 1
class UserResumeInterestInline(admin.StackedInline):
model = UserResumeInterest
extra = 1
class UserResumeQualificationInline(admin.StackedInline):
model = UserResumeQualification
extra = 1
class UserResumeJobInline(admin.StackedInline):
model = UserResumeJob
extra = 1
class UserResumeReferenceInline(admin.StackedInline):
model = UserResumeReferences
extra = 1
class UserResumeAdmin(admin.ModelAdmin):
inlines = [
UserResumeEducationInline, UserResumeLanguageInline,
UserResumeInterestInline, UserResumeQualificationInline,
UserResumeJobInline, UserResumeReferenceInline
]
admin.site.register(Company)
admin.site.register(School)
admin.site.register(UserResume, UserResumeAdmin)
admin.site.register(UserResumeEducation)
admin.site.register(UserResumeLanguage)
admin.site.register(UserResumeInterest)
admin.site.register(UserResumeQualification)
admin.site.register(UserResumeJob)
admin.site.register(UserResumeReferences) | mit | -1,410,418,413,467,217,700 | 24.591837 | 64 | 0.794094 | false |
rhd/meson | mesonbuild/backend/ninjabackend.py | 1 | 122266 | # Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import backends
from .. import modules
from .. import environment, mesonlib
from .. import build
from .. import mlog
from .. import dependencies
from .. import compilers
from ..compilers import CompilerArgs
from ..mesonlib import File, MesonException, OrderedSet
from ..mesonlib import get_meson_script, get_compiler_for_source
from .backends import CleanTrees, InstallData
from ..build import InvalidArguments
import os, sys, pickle, re
import subprocess, shutil
from collections import OrderedDict
if mesonlib.is_windows():
quote_char = '"'
execute_wrapper = 'cmd /c'
rmfile_prefix = 'del /f /s /q {} &&'
else:
quote_char = "'"
execute_wrapper = ''
rmfile_prefix = 'rm -f {} &&'
def ninja_quote(text):
for char in ('$', ' ', ':'):
text = text.replace(char, '$' + char)
if '\n' in text:
raise MesonException('Ninja does not support newlines in rules. '
'Please report this error with a test case to the Meson bug tracker.')
return text
class NinjaBuildElement:
def __init__(self, all_outputs, outfilenames, rule, infilenames):
if isinstance(outfilenames, str):
self.outfilenames = [outfilenames]
else:
self.outfilenames = outfilenames
assert(isinstance(rule, str))
self.rule = rule
if isinstance(infilenames, str):
self.infilenames = [infilenames]
else:
self.infilenames = infilenames
self.deps = []
self.orderdeps = []
self.elems = []
self.all_outputs = all_outputs
def add_dep(self, dep):
if isinstance(dep, list):
self.deps += dep
else:
self.deps.append(dep)
def add_orderdep(self, dep):
if isinstance(dep, list):
self.orderdeps += dep
else:
self.orderdeps.append(dep)
def add_item(self, name, elems):
if isinstance(elems, str):
elems = [elems]
self.elems.append((name, elems))
def write(self, outfile):
self.check_outputs()
line = 'build %s: %s %s' % (
' '.join([ninja_quote(i) for i in self.outfilenames]),
self.rule,
' '.join([ninja_quote(i) for i in self.infilenames]))
if len(self.deps) > 0:
line += ' | ' + ' '.join([ninja_quote(x) for x in self.deps])
if len(self.orderdeps) > 0:
line += ' || ' + ' '.join([ninja_quote(x) for x in self.orderdeps])
line += '\n'
# This is the only way I could find to make this work on all
# platforms including Windows command shell. Slash is a dir separator
# on Windows, too, so all characters are unambiguous and, more importantly,
# do not require quoting.
line = line.replace('\\', '/')
outfile.write(line)
# All the entries that should remain unquoted
raw_names = {'DEPFILE', 'DESC', 'pool', 'description'}
for e in self.elems:
(name, elems) = e
should_quote = name not in raw_names
line = ' %s = ' % name
q_templ = quote_char + "%s" + quote_char
noq_templ = "%s"
newelems = []
for i in elems:
if not should_quote or i == '&&': # Hackety hack hack
templ = noq_templ
else:
templ = q_templ
i = i.replace('\\', '\\\\')
if quote_char == '"':
i = i.replace('"', '\\"')
newelems.append(templ % ninja_quote(i))
line += ' '.join(newelems)
line += '\n'
outfile.write(line)
outfile.write('\n')
def check_outputs(self):
for n in self.outfilenames:
if n in self.all_outputs:
raise MesonException('Multiple producers for Ninja target "%s". Please rename your targets.' % n)
self.all_outputs[n] = True
class NinjaBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.name = 'ninja'
self.ninja_filename = 'build.ninja'
self.target_arg_cache = {}
self.fortran_deps = {}
self.all_outputs = {}
def detect_vs_dep_prefix(self, tempfilename):
'''VS writes its dependency in a locale dependent format.
Detect the search prefix to use.'''
# Of course there is another program called 'cl' on
# some platforms. Let's just require that on Windows
# cl points to msvc.
if not mesonlib.is_windows() or shutil.which('cl') is None:
return open(tempfilename, 'a')
filename = os.path.join(self.environment.get_scratch_dir(),
'incdetect.c')
with open(filename, 'w') as f:
f.write('''#include<stdio.h>
int dummy;
''')
# The output of cl dependency information is language
# and locale dependent. Any attempt at converting it to
# Python strings leads to failure. We _must_ do this detection
# in raw byte mode and write the result in raw bytes.
pc = subprocess.Popen(['cl', '/showIncludes', '/c', 'incdetect.c'],
cwd=self.environment.get_scratch_dir(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdo, _) = pc.communicate()
# We want to match 'Note: including file: ' in the line
# 'Note: including file: d:\MyDir\include\stdio.h', however
# different locales have different messages with a different
# number of colons. Match up to the the drive name 'd:\'.
matchre = re.compile(rb"^(.*\s)[a-zA-Z]:\\.*stdio.h$")
for line in stdo.split(b'\r\n'):
match = matchre.match(line)
if match:
with open(tempfilename, 'ab') as binfile:
binfile.write(b'msvc_deps_prefix = ' + match.group(1) + b'\n')
return open(tempfilename, 'a')
raise MesonException('Could not determine vs dep dependency prefix string.')
def generate(self, interp):
self.interpreter = interp
outfilename = os.path.join(self.environment.get_build_dir(), self.ninja_filename)
tempfilename = outfilename + '~'
with open(tempfilename, 'w') as outfile:
outfile.write('# This is the build file for project "%s"\n' %
self.build.get_project())
outfile.write('# It is autogenerated by the Meson build system.\n')
outfile.write('# Do not edit by hand.\n\n')
outfile.write('ninja_required_version = 1.5.1\n\n')
with self.detect_vs_dep_prefix(tempfilename) as outfile:
self.generate_rules(outfile)
self.generate_phony(outfile)
outfile.write('# Build rules for targets\n\n')
for t in self.build.get_targets().values():
self.generate_target(t, outfile)
outfile.write('# Test rules\n\n')
self.generate_tests(outfile)
outfile.write('# Install rules\n\n')
self.generate_install(outfile)
self.generate_dist(outfile)
if 'b_coverage' in self.environment.coredata.base_options and \
self.environment.coredata.base_options['b_coverage'].value:
outfile.write('# Coverage rules\n\n')
self.generate_coverage_rules(outfile)
outfile.write('# Suffix\n\n')
self.generate_utils(outfile)
self.generate_ending(outfile)
# Only ovewrite the old build file after the new one has been
# fully created.
os.replace(tempfilename, outfilename)
self.generate_compdb()
# http://clang.llvm.org/docs/JSONCompilationDatabase.html
def generate_compdb(self):
ninja_exe = environment.detect_ninja()
native_compilers = ['%s_COMPILER' % i for i in self.build.compilers]
cross_compilers = ['%s_CROSS_COMPILER' % i for i in self.build.cross_compilers]
ninja_compdb = [ninja_exe, '-t', 'compdb'] + native_compilers + cross_compilers
builddir = self.environment.get_build_dir()
try:
jsondb = subprocess.check_output(ninja_compdb, cwd=builddir)
with open(os.path.join(builddir, 'compile_commands.json'), 'wb') as f:
f.write(jsondb)
except Exception:
mlog.warning('Could not create compilation database.')
# Get all generated headers. Any source file might need them so
# we need to add an order dependency to them.
def get_generated_headers(self, target):
header_deps = []
# XXX: Why don't we add deps to CustomTarget headers here?
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue
for src in genlist.get_outputs():
if self.environment.is_header(src):
header_deps.append(self.get_target_generated_dir(target, genlist, src))
if 'vala' in target.compilers and not isinstance(target, build.Executable):
vala_header = File.from_built_file(self.get_target_dir(target), target.vala_header)
header_deps.append(vala_header)
# Recurse and find generated headers
for dep in target.link_targets:
if isinstance(dep, (build.StaticLibrary, build.SharedLibrary)):
header_deps += self.get_generated_headers(dep)
return header_deps
def get_target_generated_sources(self, target):
"""
Returns a dictionary with the keys being the path to the file
(relative to the build directory) of that type and the value
being the GeneratorList or CustomTarget that generated it.
"""
srcs = OrderedDict()
for gensrc in target.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(target, gensrc, s)
srcs[f] = s
return srcs
def get_target_sources(self, target):
srcs = OrderedDict()
for s in target.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
raise InvalidArguments('All sources in target {!r} must be of type mesonlib.File'.format(s))
f = s.rel_to_builddir(self.build_to_src)
srcs[f] = s
return srcs
# Languages that can mix with C or C++ but don't support unity builds yet
# because the syntax we use for unity builds is specific to C/++/ObjC/++.
# Assembly files cannot be unitified and neither can LLVM IR files
langs_cant_unity = ('d', 'fortran')
def get_target_source_can_unity(self, target, source):
if isinstance(source, File):
source = source.fname
if self.environment.is_llvm_ir(source) or \
self.environment.is_assembly(source):
return False
suffix = os.path.splitext(source)[1][1:]
for lang in self.langs_cant_unity:
if lang not in target.compilers:
continue
if suffix in target.compilers[lang].file_suffixes:
return False
return True
def generate_target(self, target, outfile):
if isinstance(target, build.CustomTarget):
self.generate_custom_target(target, outfile)
if isinstance(target, build.RunTarget):
self.generate_run_target(target, outfile)
name = target.get_id()
if name in self.processed_targets:
return
self.processed_targets[name] = True
# Generate rules for all dependency targets
self.process_target_dependencies(target, outfile)
# If target uses a language that cannot link to C objects,
# just generate for that language and return.
if isinstance(target, build.Jar):
self.generate_jar_target(target, outfile)
return
if 'rust' in target.compilers:
self.generate_rust_target(target, outfile)
return
if 'cs' in target.compilers:
self.generate_cs_target(target, outfile)
return
if 'swift' in target.compilers:
self.generate_swift_target(target, outfile)
return
# Now we handle the following languages:
# ObjC++, ObjC, C++, C, D, Fortran, Vala
# target_sources:
# Pre-existing target C/C++ sources to be built; dict of full path to
# source relative to build root and the original File object.
# generated_sources:
# GeneratedList and CustomTarget sources to be built; dict of the full
# path to source relative to build root and the generating target/list
# vala_generated_sources:
# Array of sources generated by valac that have to be compiled
if 'vala' in target.compilers:
# Sources consumed by valac are filtered out. These only contain
# C/C++ sources, objects, generated libs, and unknown sources now.
target_sources, generated_sources, \
vala_generated_sources = self.generate_vala_compile(target, outfile)
else:
target_sources = self.get_target_sources(target)
generated_sources = self.get_target_generated_sources(target)
vala_generated_sources = []
self.scan_fortran_module_outputs(target)
# Generate rules for GeneratedLists
self.generate_generator_list_rules(target, outfile)
# Generate rules for building the remaining source files in this target
outname = self.get_target_filename(target)
obj_list = []
use_pch = self.environment.coredata.base_options.get('b_pch', False)
is_unity = self.is_unity(target)
if use_pch and target.has_pch():
pch_objects = self.generate_pch(target, outfile)
else:
pch_objects = []
header_deps = []
unity_src = []
unity_deps = [] # Generated sources that must be built before compiling a Unity target.
header_deps += self.get_generated_headers(target)
if is_unity:
# Warn about incompatible sources if a unity build is enabled
langs = set(target.compilers.keys())
langs_cant = langs.intersection(self.langs_cant_unity)
if langs_cant:
langs_are = langs = ', '.join(langs_cant).upper()
langs_are += ' are' if len(langs_cant) > 1 else ' is'
msg = '{} not supported in Unity builds yet, so {} ' \
'sources in the {!r} target will be compiled normally' \
''.format(langs_are, langs, target.name)
mlog.log(mlog.red('FIXME'), msg)
# Get a list of all generated headers that will be needed while building
# this target's sources (generated sources and pre-existing sources).
# This will be set as dependencies of all the target's sources. At the
# same time, also deal with generated sources that need to be compiled.
generated_source_files = []
for rel_src, gensrc in generated_sources.items():
dirpart, fnamepart = os.path.split(rel_src)
raw_src = File(True, dirpart, fnamepart)
if self.environment.is_source(rel_src) and not self.environment.is_header(rel_src):
if is_unity and self.get_target_source_can_unity(target, rel_src):
unity_deps.append(raw_src)
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
unity_src.append(abs_src)
else:
generated_source_files.append(raw_src)
elif self.environment.is_object(rel_src):
obj_list.append(rel_src)
elif self.environment.is_library(rel_src):
pass
else:
# Assume anything not specifically a source file is a header. This is because
# people generate files with weird suffixes (.inc, .fh) that they then include
# in their source files.
header_deps.append(raw_src)
# These are the generated source files that need to be built for use by
# this target. We create the Ninja build file elements for this here
# because we need `header_deps` to be fully generated in the above loop.
for src in generated_source_files:
if self.environment.is_llvm_ir(src):
o = self.generate_llvm_ir_compile(target, outfile, src)
else:
o = self.generate_single_compile(target, outfile, src, True,
header_deps=header_deps)
obj_list.append(o)
# Generate compilation targets for C sources generated from Vala
# sources. This can be extended to other $LANG->C compilers later if
# necessary. This needs to be separate for at least Vala
vala_generated_source_files = []
for src in vala_generated_sources:
dirpart, fnamepart = os.path.split(src)
raw_src = File(True, dirpart, fnamepart)
if is_unity:
unity_src.append(os.path.join(self.environment.get_build_dir(), src))
header_deps.append(raw_src)
else:
# Generated targets are ordered deps because the must exist
# before the sources compiling them are used. After the first
# compile we get precise dependency info from dep files.
# This should work in all cases. If it does not, then just
# move them from orderdeps to proper deps.
if self.environment.is_header(src):
header_deps.append(raw_src)
else:
# We gather all these and generate compile rules below
# after `header_deps` (above) is fully generated
vala_generated_source_files.append(raw_src)
for src in vala_generated_source_files:
# Passing 'vala' here signifies that we want the compile
# arguments to be specialized for C code generated by
# valac. For instance, no warnings should be emitted.
obj_list.append(self.generate_single_compile(target, outfile, src, 'vala', [], header_deps))
# Generate compile targets for all the pre-existing sources for this target
for f, src in target_sources.items():
if not self.environment.is_header(src):
if self.environment.is_llvm_ir(src):
obj_list.append(self.generate_llvm_ir_compile(target, outfile, src))
elif is_unity and self.get_target_source_can_unity(target, src):
abs_src = os.path.join(self.environment.get_build_dir(),
src.rel_to_builddir(self.build_to_src))
unity_src.append(abs_src)
else:
obj_list.append(self.generate_single_compile(target, outfile, src, False, [], header_deps))
obj_list += self.flatten_object_list(target)
if is_unity:
for src in self.generate_unity_files(target, unity_src):
obj_list.append(self.generate_single_compile(target, outfile, src, True, unity_deps + header_deps))
linker = self.determine_linker(target)
elem = self.generate_link(target, outfile, outname, obj_list, linker, pch_objects)
self.generate_shlib_aliases(target, self.get_target_dir(target))
elem.write(outfile)
def process_target_dependencies(self, target, outfile):
for t in target.get_dependencies():
tname = t.get_basename() + t.type_suffix()
if tname not in self.processed_targets:
self.generate_target(t, outfile)
def custom_target_generator_inputs(self, target, outfile):
for s in target.sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, build.GeneratedList):
self.generate_genlist_for_target(s, target, outfile)
def unwrap_dep_list(self, target):
deps = []
for i in target.get_dependencies():
# FIXME, should not grab element at zero but rather expand all.
if isinstance(i, list):
i = i[0]
# Add a dependency on all the outputs of this target
for output in i.get_outputs():
deps.append(os.path.join(self.get_target_dir(i), output))
return deps
def generate_custom_target(self, target, outfile):
self.custom_target_generator_inputs(target, outfile)
(srcs, ofilenames, cmd) = self.eval_custom_target_command(target)
deps = self.unwrap_dep_list(target)
deps += self.get_custom_target_depend_files(target)
desc = 'Generating {0} with a {1} command.'
if target.build_always:
deps.append('PHONY')
if target.depfile is None:
rulename = 'CUSTOM_COMMAND'
else:
rulename = 'CUSTOM_COMMAND_DEP'
elem = NinjaBuildElement(self.all_outputs, ofilenames, rulename, srcs)
elem.add_dep(deps)
for d in target.extra_depends:
# Add a dependency on all the outputs of this target
for output in d.get_outputs():
elem.add_dep(os.path.join(self.get_target_dir(d), output))
# If the target requires capturing stdout, then use the serialized
# executable wrapper to capture that output and save it to a file.
#
# If the command line requires a newline, also use the wrapper, as
# ninja does not support them in its build rule syntax.
#
# Windows doesn't have -rpath, so for EXEs that need DLLs built within
# the project, we need to set PATH so the DLLs are found. We use
# a serialized executable wrapper for that and check if the
# CustomTarget command needs extra paths first.
if (target.capture or any('\n' in c for c in cmd) or
((mesonlib.is_windows() or mesonlib.is_cygwin()) and
self.determine_windows_extra_paths(target.command[0]))):
exe_data = self.serialize_executable(target.command[0], cmd[1:],
# All targets are built from the build dir
self.environment.get_build_dir(),
capture=ofilenames[0] if target.capture else None)
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'exe', exe_data]
cmd_type = 'meson_exe.py custom'
else:
cmd_type = 'custom'
if target.depfile is not None:
rel_dfile = os.path.join(self.get_target_dir(target), target.depfile)
abs_pdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(target))
os.makedirs(abs_pdir, exist_ok=True)
elem.add_item('DEPFILE', rel_dfile)
elem.add_item('COMMAND', cmd)
elem.add_item('description', desc.format(target.name, cmd_type))
elem.write(outfile)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_run_target(self, target, outfile):
cmd = [sys.executable, self.environment.get_build_command(), '--internal', 'commandrunner']
deps = self.unwrap_dep_list(target)
arg_strings = []
for i in target.args:
if isinstance(i, str):
arg_strings.append(i)
elif isinstance(i, (build.BuildTarget, build.CustomTarget)):
relfname = self.get_target_filename(i)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
deps.append(relfname)
elif isinstance(i, mesonlib.File):
relfname = i.rel_to_builddir(self.build_to_src)
arg_strings.append(os.path.join(self.environment.get_build_dir(), relfname))
else:
raise AssertionError('Unreachable code in generate_run_target: ' + str(i))
elem = NinjaBuildElement(self.all_outputs, target.name, 'CUSTOM_COMMAND', [])
cmd += [self.environment.get_source_dir(),
self.environment.get_build_dir(),
target.subdir,
get_meson_script(self.environment, 'mesonintrospect')]
texe = target.command
try:
texe = texe.held_object
except AttributeError:
pass
if isinstance(texe, build.Executable):
abs_exe = os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))
deps.append(self.get_target_filename(texe))
if self.environment.is_cross_build() and \
self.environment.cross_info.need_exe_wrapper():
exe_wrap = self.environment.cross_info.config['binaries'].get('exe_wrapper', None)
if exe_wrap is not None:
cmd += [exe_wrap]
cmd.append(abs_exe)
elif isinstance(texe, dependencies.ExternalProgram):
cmd += texe.get_command()
elif isinstance(texe, build.CustomTarget):
deps.append(self.get_target_filename(texe))
cmd += [os.path.join(self.environment.get_build_dir(), self.get_target_filename(texe))]
else:
cmd.append(target.command)
cmd += arg_strings
elem.add_dep(deps)
elem.add_item('COMMAND', cmd)
elem.add_item('description', 'Running external command %s.' % target.name)
elem.add_item('pool', 'console')
elem.write(outfile)
self.processed_targets[target.name + target.type_suffix()] = True
def generate_coverage_rules(self, outfile):
e = NinjaBuildElement(self.all_outputs, 'coverage', 'CUSTOM_COMMAND', 'PHONY')
e.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'coverage',
self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_log_dir()])
e.add_item('description', 'Generates coverage reports.')
e.write(outfile)
self.generate_coverage_legacy_rules(outfile)
def generate_coverage_legacy_rules(self, outfile):
(gcovr_exe, lcov_exe, genhtml_exe) = environment.find_coverage_tools()
added_rule = False
if gcovr_exe:
added_rule = True
elem = NinjaBuildElement(self.all_outputs, 'coverage-xml', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-x', '-r', self.environment.get_source_dir(),
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.xml')])
elem.add_item('DESC', 'Generating XML coverage report.')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, 'coverage-text', 'CUSTOM_COMMAND', '')
elem.add_item('COMMAND', [gcovr_exe, '-r', self.environment.get_source_dir(),
'-o', os.path.join(self.environment.get_log_dir(), 'coverage.txt')])
elem.add_item('DESC', 'Generating text coverage report.')
elem.write(outfile)
if lcov_exe and genhtml_exe:
added_rule = True
htmloutdir = os.path.join(self.environment.get_log_dir(), 'coveragereport')
covinfo = os.path.join(self.environment.get_log_dir(), 'coverage.info')
phony_elem = NinjaBuildElement(self.all_outputs, 'coverage-html', 'phony', os.path.join(htmloutdir, 'index.html'))
phony_elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, os.path.join(htmloutdir, 'index.html'), 'CUSTOM_COMMAND', '')
command = [lcov_exe, '--directory', self.environment.get_build_dir(),
'--capture', '--output-file', covinfo, '--no-checksum',
'&&', genhtml_exe, '--prefix', self.environment.get_build_dir(),
'--output-directory', htmloutdir, '--title', 'Code coverage',
'--legend', '--show-details', covinfo]
elem.add_item('COMMAND', command)
elem.add_item('DESC', 'Generating HTML coverage report.')
elem.write(outfile)
if not added_rule:
mlog.warning('coverage requested but neither gcovr nor lcov/genhtml found.')
def generate_install(self, outfile):
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
if self.environment.is_cross_build():
bins = self.environment.cross_info.config['binaries']
if 'strip' not in bins:
mlog.warning('Cross file does not specify strip binary, result will not be stripped.')
strip_bin = None
else:
strip_bin = mesonlib.stringlistify(bins['strip'])
else:
strip_bin = self.environment.native_strip_bin
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(),
strip_bin,
get_meson_script(self.environment, 'mesonintrospect'))
elem = NinjaBuildElement(self.all_outputs, 'install', 'CUSTOM_COMMAND', 'PHONY')
elem.add_dep('all')
elem.add_item('DESC', 'Installing files.')
elem.add_item('COMMAND', [sys.executable, self.environment.get_build_command(), '--internal', 'install', install_data_file])
elem.add_item('pool', 'console')
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
elem.write(outfile)
with open(install_data_file, 'wb') as ofile:
pickle.dump(d, ofile)
def generate_target_install(self, d):
for t in self.build.get_targets().values():
if not t.should_install():
continue
# Find the installation directory.
outdirs = t.get_custom_install_dir()
custom_install_dir = False
if outdirs[0] is not None and outdirs[0] is not True:
# Either the value is set, or is set to False which means
# we want this specific output out of many outputs to not
# be installed.
custom_install_dir = True
elif isinstance(t, build.SharedModule):
outdirs[0] = self.environment.get_shared_module_dir()
elif isinstance(t, build.SharedLibrary):
outdirs[0] = self.environment.get_shared_lib_dir()
elif isinstance(t, build.StaticLibrary):
outdirs[0] = self.environment.get_static_lib_dir()
elif isinstance(t, build.Executable):
outdirs[0] = self.environment.get_bindir()
else:
assert(isinstance(t, build.BuildTarget))
# XXX: Add BuildTarget-specific install dir cases here
outdirs[0] = self.environment.get_libdir()
# Sanity-check the outputs and install_dirs
num_outdirs, num_out = len(outdirs), len(t.get_outputs())
if num_outdirs != 1 and num_outdirs != num_out:
m = 'Target {!r} has {} outputs: {!r}, but only {} "install_dir"s were found.\n' \
"Pass 'false' for outputs that should not be installed and 'true' for\n" \
'using the default installation directory for an output.'
raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))
# Install the target output(s)
if isinstance(t, build.BuildTarget):
should_strip = self.get_option_for_target('strip', t)
# Install primary build output (library/executable/jar, etc)
# Done separately because of strip/aliases/rpath
if outdirs[0] is not False:
i = [self.get_target_filename(t), outdirs[0],
t.get_aliases(), should_strip, t.install_rpath]
d.targets.append(i)
# On toolchains/platforms that use an import library for
# linking (separate from the shared library with all the
# code), we need to install that too (dll.a/.lib).
if isinstance(t, build.SharedLibrary) and t.get_import_filename():
if custom_install_dir:
# If the DLL is installed into a custom directory,
# install the import library into the same place so
# it doesn't go into a surprising place
implib_install_dir = outdirs[0]
else:
implib_install_dir = self.environment.get_import_lib_dir()
# Install the import library.
i = [self.get_target_filename_for_linking(t),
implib_install_dir,
# It has no aliases, should not be stripped, and
# doesn't have an install_rpath
{}, False, '']
d.targets.append(i)
# Install secondary outputs. Only used for Vala right now.
if num_outdirs > 1:
for output, outdir in zip(t.get_outputs()[1:], outdirs[1:]):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdir, {}, False, None])
elif isinstance(t, build.CustomTarget):
# If only one install_dir is specified, assume that all
# outputs will be installed into it. This is for
# backwards-compatibility and because it makes sense to
# avoid repetition since this is a common use-case.
#
# To selectively install only some outputs, pass `false` as
# the install_dir for the corresponding output by index
if num_outdirs == 1 and num_out > 1:
for output in t.get_outputs():
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdirs[0], {}, False, None])
else:
for output, outdir in zip(t.get_outputs(), outdirs):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
d.targets.append([f, outdir, {}, False, None])
def generate_custom_install_script(self, d):
result = []
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for i in self.build.install_scripts:
exe = i['exe']
args = i['args']
fixed_args = []
for a in args:
a = a.replace('@SOURCE_ROOT@', srcdir)
a = a.replace('@BUILD_ROOT@', builddir)
fixed_args.append(a)
result.append(build.RunScript(exe, fixed_args))
d.install_scripts = result
def generate_header_install(self, d):
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
if not isinstance(f, File):
msg = 'Invalid header type {!r} can\'t be installed'
raise MesonException(msg.format(f))
abspath = f.absolute_path(srcdir, builddir)
i = [abspath, outdir]
d.headers.append(i)
def generate_man_install(self, d):
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
subdir = os.path.join(manroot, 'man' + num)
srcabs = os.path.join(self.environment.get_source_dir(), m.get_source_subdir(), f)
dstabs = os.path.join(subdir, os.path.split(f)[1] + '.gz')
i = [srcabs, dstabs]
d.man.append(i)
def generate_data_install(self, d):
data = self.build.get_data()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
for f in de.sources:
assert(isinstance(f, mesonlib.File))
plain_f = os.path.split(f.fname)[1]
dstabs = os.path.join(subdir, plain_f)
i = [f.absolute_path(srcdir, builddir), dstabs, de.install_mode]
d.data.append(i)
def generate_subdir_install(self, d):
for sd in self.build.get_install_subdirs():
inst_subdir = sd.installable_subdir.rstrip('/')
idir_parts = inst_subdir.split('/')
if len(idir_parts) > 1:
subdir = os.path.join(sd.source_subdir, '/'.join(idir_parts[:-1]))
inst_dir = idir_parts[-1]
else:
subdir = sd.source_subdir
inst_dir = sd.installable_subdir
src_dir = os.path.join(self.environment.get_source_dir(), subdir)
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, inst_dir, dst_dir, sd.install_mode])
def generate_tests(self, outfile):
self.serialize_tests()
test_exe = get_meson_script(self.environment, 'mesontest')
cmd = [sys.executable, '-u', test_exe, '--no-rebuild']
if not self.environment.coredata.get_builtin_option('stdsplit'):
cmd += ['--no-stdsplit']
if self.environment.coredata.get_builtin_option('errorlogs'):
cmd += ['--print-errorlogs']
elem = NinjaBuildElement(self.all_outputs, 'test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
# And then benchmarks.
cmd = [sys.executable, '-u', test_exe, '--benchmark', '--logbase',
'benchmarklog', '--num-processes=1', '--no-rebuild']
elem = NinjaBuildElement(self.all_outputs, 'benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile)
outfile.write('# Rules for linking.\n\n')
if self.environment.is_cross_build():
self.generate_static_link_rules(True, outfile)
self.generate_static_link_rules(False, outfile)
self.generate_dynamic_link_rules(outfile)
outfile.write('# Other rules\n\n')
outfile.write('rule CUSTOM_COMMAND\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' restat = 1\n\n')
# Ninja errors out if you have deps = gcc but no depfile, so we must
# have two rules for custom commands.
outfile.write('rule CUSTOM_COMMAND_DEP\n')
outfile.write(' command = $COMMAND\n')
outfile.write(' description = $DESC\n')
outfile.write(' deps = gcc\n')
outfile.write(' depfile = $DEPFILE\n')
outfile.write(' restat = 1\n\n')
outfile.write('rule REGENERATE_BUILD\n')
c = (quote_char + ninja_quote(sys.executable) + quote_char,
quote_char + ninja_quote(self.environment.get_build_command()) + quote_char,
'--internal',
'regenerate',
quote_char + ninja_quote(self.environment.get_source_dir()) + quote_char,
quote_char + ninja_quote(self.environment.get_build_dir()) + quote_char)
outfile.write(" command = %s %s %s %s %s %s --backend ninja\n" % c)
outfile.write(' description = Regenerating build files.\n')
outfile.write(' generator = 1\n\n')
outfile.write('\n')
def generate_phony(self, outfile):
outfile.write('# Phony build target, always out of date\n')
outfile.write('build PHONY: phony\n')
outfile.write('\n')
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []
compiler = target.compilers['java']
c = 'c'
m = ''
e = ''
f = 'f'
main_class = target.get_main_class()
if main_class != '':
e = 'e'
for src in src_list:
plain_class_path = self.generate_single_java_compile(src, target, compiler, outfile)
class_list.append(plain_class_path)
class_dep_list = [os.path.join(self.get_target_private_dir(target), i) for i in class_list]
jar_rule = 'java_LINKER'
commands = [c + m + e + f]
if e != '':
commands.append(main_class)
commands.append(self.get_target_filename(target))
# Java compilation can produce an arbitrary number of output
# class files for a single source file. Thus tell jar to just
# grab everything in the final package.
commands += ['-C', self.get_target_private_dir(target), '.']
elem = NinjaBuildElement(self.all_outputs, outname_rel, jar_rule, [])
elem.add_dep(class_dep_list)
elem.add_item('ARGS', commands)
elem.write(outfile)
def generate_cs_resource_tasks(self, target, outfile):
args = []
deps = []
for r in target.resources:
rel_sourcefile = os.path.join(self.build_to_src, target.subdir, r)
if r.endswith('.resources'):
a = '-resource:' + rel_sourcefile
elif r.endswith('.txt') or r.endswith('.resx'):
ofilebase = os.path.splitext(os.path.basename(r))[0] + '.resources'
ofilename = os.path.join(self.get_target_private_dir(target), ofilebase)
elem = NinjaBuildElement(self.all_outputs, ofilename, "CUSTOM_COMMAND", rel_sourcefile)
elem.add_item('COMMAND', ['resgen', rel_sourcefile, ofilename])
elem.add_item('DESC', 'Compiling resource %s.' % rel_sourcefile)
elem.write(outfile)
deps.append(ofilename)
a = '-resource:' + ofilename
else:
raise InvalidArguments('Unknown resource file %s.' % r)
args.append(a)
return args, deps
def generate_cs_target(self, target, outfile):
buildtype = self.get_option_for_target('buildtype', target)
fname = target.get_filename()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
compiler = target.compilers['cs']
rel_srcs = [s.rel_to_builddir(self.build_to_src) for s in src_list]
deps = []
commands = target.extra_args.get('cs', [])
commands += compiler.get_buildtype_args(buildtype)
if isinstance(target, build.Executable):
commands.append('-target:exe')
elif isinstance(target, build.SharedLibrary):
commands.append('-target:library')
else:
raise MesonException('Unknown C# target type.')
(resource_args, resource_deps) = self.generate_cs_resource_tasks(target, outfile)
commands += resource_args
deps += resource_deps
commands += compiler.get_output_args(outname_rel)
for l in target.link_targets:
lname = os.path.join(self.get_target_dir(l), l.get_filename())
commands += compiler.get_link_args(lname)
deps.append(lname)
if '-g' in commands:
outputs = [outname_rel, outname_rel + '.mdb']
else:
outputs = [outname_rel]
elem = NinjaBuildElement(self.all_outputs, outputs, 'cs_COMPILER', rel_srcs)
elem.add_dep(deps)
elem.add_item('ARGS', commands)
elem.write(outfile)
def generate_single_java_compile(self, src, target, compiler, outfile):
args = []
args += compiler.get_buildtype_args(self.get_option_for_target('buildtype', target))
args += self.build.get_global_args(compiler)
args += self.build.get_project_args(compiler, target.subproject)
args += target.get_java_args()
args += compiler.get_output_args(self.get_target_private_dir(target))
for i in target.include_dirs:
for idir in i.get_incdirs():
args += ['-sourcepath', os.path.join(self.build_to_src, i.curdir, idir)]
rel_src = src.rel_to_builddir(self.build_to_src)
plain_class_path = src.fname[:-4] + 'class'
rel_obj = os.path.join(self.get_target_private_dir(target), plain_class_path)
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler.get_language() + '_COMPILER', rel_src)
element.add_item('ARGS', args)
element.write(outfile)
return plain_class_path
def generate_java_link(self, outfile):
rule = 'rule java_LINKER\n'
command = ' command = jar $ARGS\n'
description = ' description = Creating JAR $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def determine_dep_vapis(self, target):
"""
Peek into the sources of BuildTargets we're linking with, and if any of
them was built with Vala, assume that it also generated a .vapi file of
the same name as the BuildTarget and return the path to it relative to
the build directory.
"""
result = OrderedSet()
for dep in target.link_targets:
for i in dep.sources:
if hasattr(i, 'fname'):
i = i.fname
if i.endswith('vala'):
vapiname = dep.name + '.vapi'
fullname = os.path.join(self.get_target_dir(dep), vapiname)
result.add(fullname)
break
return list(result)
def split_vala_sources(self, t):
"""
Splits the target's sources into .vala, .vapi, and other sources.
Handles both pre-existing and generated sources.
Returns a tuple (vala, vapi, others) each of which is a dictionary with
the keys being the path to the file (relative to the build directory)
and the value being the object that generated or represents the file.
"""
vala = OrderedDict()
vapi = OrderedDict()
others = OrderedDict()
othersgen = OrderedDict()
# Split pre-existing sources
for s in t.get_sources():
# BuildTarget sources are always mesonlib.File files which are
# either in the source root, or generated with configure_file and
# in the build root
if not isinstance(s, File):
msg = 'All sources in target {!r} must be of type ' \
'mesonlib.File, not {!r}'.format(t, s)
raise InvalidArguments(msg)
f = s.rel_to_builddir(self.build_to_src)
if s.endswith('.vala'):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
else:
srctype = others
srctype[f] = s
# Split generated sources
for gensrc in t.get_generated_sources():
for s in gensrc.get_outputs():
f = self.get_target_generated_dir(t, gensrc, s)
if s.endswith('.vala'):
srctype = vala
elif s.endswith('.vapi'):
srctype = vapi
# Generated non-Vala (C/C++) sources. Won't be used for
# generating the Vala compile rule below.
else:
srctype = othersgen
# Duplicate outputs are disastrous
if f in srctype and srctype[f] is not gensrc:
msg = 'Duplicate output {0!r} from {1!r} {2!r}; ' \
'conflicts with {0!r} from {4!r} {3!r}' \
''.format(f, type(gensrc).__name__, gensrc.name,
srctype[f].name, type(srctype[f]).__name__)
raise InvalidArguments(msg)
# Store 'somefile.vala': GeneratedList (or CustomTarget)
srctype[f] = gensrc
return vala, vapi, (others, othersgen)
def generate_vala_compile(self, target, outfile):
"""Vala is compiled into C. Set up all necessary build steps here."""
(vala_src, vapi_src, other_src) = self.split_vala_sources(target)
extra_dep_files = []
if not vala_src:
msg = 'Vala library {!r} has no Vala source files.'
raise InvalidArguments(msg.format(target.name))
valac = target.compilers['vala']
c_out_dir = self.get_target_private_dir(target)
# C files generated by valac
vala_c_src = []
# Files generated by valac
valac_outputs = []
# All sources that are passed to valac on the commandline
all_files = list(vapi_src.keys())
for (vala_file, gensrc) in vala_src.items():
all_files.append(vala_file)
# Figure out where the Vala compiler will write the compiled C file
# If the Vala file is in a subdir of the build dir (in our case
# because it was generated/built by something else), the subdir path
# components will be preserved in the output path. But if the Vala
# file is outside the build directory, the path components will be
# stripped and just the basename will be used.
if isinstance(gensrc, (build.CustomTarget, build.GeneratedList)) or gensrc.is_built:
vala_c_file = os.path.splitext(vala_file)[0] + '.c'
else:
vala_c_file = os.path.splitext(os.path.basename(vala_file))[0] + '.c'
# All this will be placed inside the c_out_dir
vala_c_file = os.path.join(c_out_dir, vala_c_file)
vala_c_src.append(vala_c_file)
valac_outputs.append(vala_c_file)
args = self.generate_basic_compiler_args(target, valac)
# Tell Valac to output everything in our private directory. Sadly this
# means it will also preserve the directory components of Vala sources
# found inside the build tree (generated sources).
args += ['-d', c_out_dir]
if not isinstance(target, build.Executable):
# Library name
args += ['--library=' + target.name]
# Outputted header
hname = os.path.join(self.get_target_dir(target), target.vala_header)
args += ['-H', hname, '--use-header']
valac_outputs.append(hname)
# Outputted vapi file
vapiname = os.path.join(self.get_target_dir(target), target.vala_vapi)
# Force valac to write the vapi and gir files in the target build dir.
# Without this, it will write it inside c_out_dir
args += ['--vapi', os.path.join('..', target.vala_vapi)]
valac_outputs.append(vapiname)
target.outputs += [target.vala_header, target.vala_vapi]
# Install header and vapi to default locations if user requests this
if len(target.install_dir) > 1 and target.install_dir[1] is True:
target.install_dir[1] = self.environment.get_includedir()
if len(target.install_dir) > 2 and target.install_dir[2] is True:
target.install_dir[2] = os.path.join(self.environment.get_datadir(), 'vala', 'vapi')
# Generate GIR if requested
if isinstance(target.vala_gir, str):
girname = os.path.join(self.get_target_dir(target), target.vala_gir)
args += ['--gir', os.path.join('..', target.vala_gir)]
valac_outputs.append(girname)
target.outputs.append(target.vala_gir)
# Install GIR to default location if requested by user
if len(target.install_dir) > 3 and target.install_dir[3] is True:
target.install_dir[3] = os.path.join(self.environment.get_datadir(), 'gir-1.0')
# Detect gresources and add --gresources arguments for each
for (gres, gensrc) in other_src[1].items():
if isinstance(gensrc, modules.GResourceTarget):
gres_xml, = self.get_custom_target_sources(gensrc)
args += ['--gresources=' + gres_xml]
extra_args = []
for a in target.extra_args.get('vala', []):
if isinstance(a, File):
relname = a.rel_to_builddir(self.build_to_src)
extra_dep_files.append(relname)
extra_args.append(relname)
else:
extra_args.append(a)
dependency_vapis = self.determine_dep_vapis(target)
extra_dep_files += dependency_vapis
args += extra_args
element = NinjaBuildElement(self.all_outputs, valac_outputs,
valac.get_language() + '_COMPILER',
all_files + dependency_vapis)
element.add_item('ARGS', args)
element.add_dep(extra_dep_files)
element.write(outfile)
return other_src[0], other_src[1], vala_c_src
def generate_rust_target(self, target, outfile):
rustc = target.compilers['rust']
relsrc = []
for i in target.get_sources():
if not rustc.can_compile(i):
raise InvalidArguments('Rust target %s contains a non-rust source file.' % target.get_basename())
relsrc.append(i.rel_to_builddir(self.build_to_src))
target_name = os.path.join(target.subdir, target.get_filename())
args = ['--crate-type']
if isinstance(target, build.Executable):
cratetype = 'bin'
elif isinstance(target, build.SharedLibrary):
cratetype = 'rlib'
elif isinstance(target, build.StaticLibrary):
cratetype = 'rlib'
else:
raise InvalidArguments('Unknown target type for rustc.')
args.append(cratetype)
args += rustc.get_buildtype_args(self.get_option_for_target('buildtype', target))
depfile = os.path.join(target.subdir, target.name + '.d')
args += ['--emit', 'dep-info={}'.format(depfile), '--emit', 'link']
args += target.get_extra_args('rust')
args += ['-o', os.path.join(target.subdir, target.get_filename())]
orderdeps = [os.path.join(t.subdir, t.get_filename()) for t in target.link_targets]
linkdirs = OrderedDict()
for d in target.link_targets:
linkdirs[d.subdir] = True
for d in linkdirs.keys():
if d == '':
d = '.'
args += ['-L', d]
element = NinjaBuildElement(self.all_outputs, target_name, 'rust_COMPILER', relsrc)
if len(orderdeps) > 0:
element.add_orderdep(orderdeps)
element.add_item('ARGS', args)
element.add_item('targetdep', depfile)
element.add_item('cratetype', cratetype)
element.write(outfile)
def swift_module_file_name(self, target):
return os.path.join(self.get_target_private_dir(target),
self.target_swift_modulename(target) + '.swiftmodule')
def target_swift_modulename(self, target):
return target.name
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_modules(self, target):
result = []
for l in target.link_targets:
if self.is_swift_target(l):
result.append(self.swift_module_file_name(l))
return result
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_swift_link_deps(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_filename(l))
return result
def split_swift_generated_sources(self, target):
all_srcs = self.get_target_generated_sources(target)
srcs = []
others = []
for i in all_srcs:
if i.endswith('.swift'):
srcs.append(i)
else:
others.append(i)
return srcs, others
def generate_swift_target(self, target, outfile):
module_name = self.target_swift_modulename(target)
swiftc = target.compilers['swift']
abssrc = []
abs_headers = []
header_imports = []
for i in target.get_sources():
if swiftc.can_compile(i):
relsrc = i.rel_to_builddir(self.build_to_src)
abss = os.path.normpath(os.path.join(self.environment.get_build_dir(), relsrc))
abssrc.append(abss)
elif self.environment.is_header(i):
relh = i.rel_to_builddir(self.build_to_src)
absh = os.path.normpath(os.path.join(self.environment.get_build_dir(), relh))
abs_headers.append(absh)
header_imports += swiftc.get_header_import_args(absh)
else:
raise InvalidArguments('Swift target %s contains a non-swift source file.' % target.get_basename())
os.makedirs(self.get_target_private_dir_abs(target), exist_ok=True)
compile_args = swiftc.get_compile_only_args()
compile_args += swiftc.get_module_args(module_name)
link_args = swiftc.get_output_args(os.path.join(self.environment.get_build_dir(), self.get_target_filename(target)))
rundir = self.get_target_private_dir(target)
out_module_name = self.swift_module_file_name(target)
in_module_files = self.determine_swift_dep_modules(target)
abs_module_dirs = self.determine_swift_dep_dirs(target)
module_includes = []
for x in abs_module_dirs:
module_includes += swiftc.get_include_args(x)
link_deps = self.get_swift_link_deps(target)
abs_link_deps = [os.path.join(self.environment.get_build_dir(), x) for x in link_deps]
(rel_generated, _) = self.split_swift_generated_sources(target)
abs_generated = [os.path.join(self.environment.get_build_dir(), x) for x in rel_generated]
# We need absolute paths because swiftc needs to be invoked in a subdir
# and this is the easiest way about it.
objects = [] # Relative to swift invocation dir
rel_objects = [] # Relative to build.ninja
for i in abssrc + abs_generated:
base = os.path.split(i)[1]
oname = os.path.splitext(base)[0] + '.o'
objects.append(oname)
rel_objects.append(os.path.join(self.get_target_private_dir(target), oname))
# Swiftc does not seem to be able to emit objects and module files in one go.
elem = NinjaBuildElement(self.all_outputs, rel_objects,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_dep(abs_headers)
elem.add_item('ARGS', compile_args + header_imports + abs_generated + module_includes)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, out_module_name,
'swift_COMPILER',
abssrc)
elem.add_dep(in_module_files + rel_generated)
elem.add_item('ARGS', compile_args + abs_generated + module_includes + swiftc.get_mod_gen_args())
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
if isinstance(target, build.StaticLibrary):
elem = self.generate_link(target, outfile, self.get_target_filename(target),
rel_objects, self.build.static_linker)
elem.write(outfile)
elif isinstance(target, build.Executable):
elem = NinjaBuildElement(self.all_outputs, self.get_target_filename(target), 'swift_COMPILER', [])
elem.add_dep(rel_objects)
elem.add_dep(link_deps)
elem.add_item('ARGS', link_args + swiftc.get_std_exe_link_args() + objects + abs_link_deps)
elem.add_item('RUNDIR', rundir)
elem.write(outfile)
else:
raise MesonException('Swift supports only executable and static library targets.')
def generate_static_link_rules(self, is_cross, outfile):
if 'java' in self.build.compilers:
if not is_cross:
self.generate_java_link(outfile)
if is_cross:
if self.environment.cross_info.need_cross_compiler():
static_linker = self.build.static_cross_linker
else:
static_linker = self.build.static_linker
crstr = '_CROSS'
else:
static_linker = self.build.static_linker
crstr = ''
if static_linker is None:
return
rule = 'rule STATIC%s_LINKER\n' % crstr
# We don't use @file.rsp on Windows with ArLinker because llvm-ar and
# gcc-ar blindly pass the --plugin argument to `ar` and you cannot pass
# options as arguments while using the @file.rsp syntax.
# See: https://github.com/mesonbuild/meson/issues/1646
if mesonlib.is_windows() and not isinstance(static_linker, compilers.ArLinker):
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = $LINK_ARGS {output_args} $in
'''
else:
command_template = ' command = {executable} $LINK_ARGS {output_args} $in\n'
cmdlist = []
# FIXME: Must normalize file names with pathlib.Path before writing
# them out to fix this properly on Windows. See:
# https://github.com/mesonbuild/meson/issues/1517
# https://github.com/mesonbuild/meson/issues/1526
if isinstance(static_linker, compilers.ArLinker) and not mesonlib.is_windows():
# `ar` has no options to overwrite archives. It always appends,
# which is never what we want. Delete an existing library first if
# it exists. https://github.com/mesonbuild/meson/issues/1355
cmdlist = [execute_wrapper, rmfile_prefix.format('$out')]
cmdlist += static_linker.get_exelist()
command = command_template.format(
executable=' '.join(cmdlist),
output_args=' '.join(static_linker.get_output_args('$out')))
description = ' description = Linking static target $out.\n\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
def generate_dynamic_link_rules(self, outfile):
ctypes = [(self.build.compilers, False)]
if self.environment.is_cross_build():
if self.environment.cross_info.need_cross_compiler():
ctypes.append((self.build.cross_compilers, True))
else:
# Native compiler masquerades as the cross compiler.
ctypes.append((self.build.compilers, True))
else:
ctypes.append((self.build.cross_compilers, True))
for (complist, is_cross) in ctypes:
for langname, compiler in complist.items():
if langname == 'java' \
or langname == 'vala' \
or langname == 'rust' \
or langname == 'cs':
continue
crstr = ''
cross_args = []
if is_cross:
crstr = '_CROSS'
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_link_args']
except KeyError:
pass
rule = 'rule %s%s_LINKER\n' % (langname, crstr)
if mesonlib.is_windows():
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = $ARGS {output_args} $in $LINK_ARGS {cross_args} $aliasing
'''
else:
command_template = ' command = {executable} $ARGS {output_args} $in $LINK_ARGS {cross_args} $aliasing\n'
command = command_template.format(
executable=' '.join(compiler.get_linker_exelist()),
cross_args=' '.join(cross_args),
output_args=' '.join(compiler.get_linker_output_args('$out'))
)
description = ' description = Linking target $out.'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
outfile.write('\n')
symrule = 'rule SHSYM\n'
symcmd = ' command = "%s" "%s" %s %s %s %s $CROSS\n' % (ninja_quote(sys.executable),
self.environment.get_build_command(),
'--internal',
'symbolextractor',
'$in',
'$out')
synstat = ' restat = 1\n'
syndesc = ' description = Generating symbol file $out.\n'
outfile.write(symrule)
outfile.write(symcmd)
outfile.write(synstat)
outfile.write(syndesc)
outfile.write('\n')
def generate_java_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Java object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_cs_compile_rule(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling C Sharp target $out.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_vala_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Vala source $in.\n'
restat = ' restat = 1\n' # ValaC does this always to take advantage of it.
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(restat)
outfile.write('\n')
def generate_rust_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
invoc = ' '.join([ninja_quote(i) for i in compiler.get_exelist()])
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Rust source $in.\n'
depfile = ' depfile = $targetdep\n'
depstyle = ' deps = gcc\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write(depfile)
outfile.write(depstyle)
outfile.write('\n')
def generate_swift_compile_rules(self, compiler, outfile):
rule = 'rule %s_COMPILER\n' % compiler.get_language()
full_exe = [ninja_quote(sys.executable),
ninja_quote(self.environment.get_build_command()),
'--internal',
'dirchanger',
'$RUNDIR']
invoc = (' '.join(full_exe) + ' ' +
' '.join(ninja_quote(i) for i in compiler.get_exelist()))
command = ' command = %s $ARGS $in\n' % invoc
description = ' description = Compiling Swift source $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
def generate_fortran_dep_hack(self, outfile):
if mesonlib.is_windows():
cmd = 'cmd /C ""'
else:
cmd = 'true'
template = '''# Workaround for these issues:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47485
rule FORTRAN_DEP_HACK
command = %s
description = Dep hack
restat = 1
'''
outfile.write(template % cmd)
def generate_llvm_ir_compile_rule(self, compiler, is_cross, outfile):
if getattr(self, 'created_llvm_ir_rule', False):
return
rule = 'rule llvm_ir{}_COMPILER\n'.format('_CROSS' if is_cross else '')
if mesonlib.is_windows():
command_template = ' command = {executable} @$out.rsp\n' \
' rspfile = $out.rsp\n' \
' rspfile_content = {cross_args} $ARGS {output_args} {compile_only_args} $in\n'
else:
command_template = ' command = {executable} {cross_args} $ARGS {output_args} {compile_only_args} $in\n'
command = command_template.format(
executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),
cross_args=' '.join(self.get_cross_info_lang_args(compiler.language, is_cross)),
output_args=' '.join(compiler.get_output_args('$out')),
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Compiling LLVM IR object $in.\n'
outfile.write(rule)
outfile.write(command)
outfile.write(description)
outfile.write('\n')
self.created_llvm_ir_rule = True
def get_cross_info_lang_args(self, lang, is_cross):
if is_cross:
try:
return self.environment.cross_info.config['properties'][lang + '_args']
except KeyError:
pass
return []
def generate_compile_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname == 'java':
if not is_cross:
self.generate_java_compile_rule(compiler, outfile)
return
if langname == 'cs':
if not is_cross:
self.generate_cs_compile_rule(compiler, outfile)
return
if langname == 'vala':
if not is_cross:
self.generate_vala_compile_rules(compiler, outfile)
return
if langname == 'rust':
if not is_cross:
self.generate_rust_compile_rules(compiler, outfile)
return
if langname == 'swift':
if not is_cross:
self.generate_swift_compile_rules(compiler, outfile)
return
if langname == 'fortran':
self.generate_fortran_dep_hack(outfile)
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_COMPILER\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
cross_args = self.get_cross_info_lang_args(langname, is_cross)
if mesonlib.is_windows():
command_template = ''' command = {executable} @$out.rsp
rspfile = $out.rsp
rspfile_content = {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in
'''
else:
command_template = ' command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n'
command = command_template.format(
executable=' '.join([ninja_quote(i) for i in compiler.get_exelist()]),
cross_args=' '.join(cross_args),
dep_args=' '.join(quoted_depargs),
output_args=' '.join(compiler.get_output_args('$out')),
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Compiling %s object $out.\n' % langname.title()
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_pch_rule_for(self, langname, compiler, qstr, is_cross, outfile):
if langname != 'c' and langname != 'cpp':
return
if is_cross:
crstr = '_CROSS'
else:
crstr = ''
rule = 'rule %s%s_PCH\n' % (langname, crstr)
depargs = compiler.get_dependency_gen_args('$out', '$DEPFILE')
cross_args = []
if is_cross:
try:
cross_args = self.environment.cross_info.config['properties'][langname + '_args']
except KeyError:
pass
quoted_depargs = []
for d in depargs:
if d != '$out' and d != '$in':
d = qstr % d
quoted_depargs.append(d)
if compiler.get_id() == 'msvc':
output = ''
else:
output = ' '.join(compiler.get_output_args('$out'))
command = " command = {executable} {cross_args} $ARGS {dep_args} {output_args} {compile_only_args} $in\n".format(
executable=' '.join(compiler.get_exelist()),
cross_args=' '.join(cross_args),
dep_args=' '.join(quoted_depargs),
output_args=output,
compile_only_args=' '.join(compiler.get_compile_only_args())
)
description = ' description = Precompiling header %s.\n' % '$in'
if compiler.get_id() == 'msvc':
deps = ' deps = msvc\n'
else:
deps = ' deps = gcc\n'
deps += ' depfile = $DEPFILE\n'
outfile.write(rule)
outfile.write(command)
outfile.write(deps)
outfile.write(description)
outfile.write('\n')
def generate_compile_rules(self, outfile):
qstr = quote_char + "%s" + quote_char
for langname, compiler in self.build.compilers.items():
if compiler.get_id() == 'clang':
self.generate_llvm_ir_compile_rule(compiler, False, outfile)
self.generate_compile_rule_for(langname, compiler, qstr, False, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, False, outfile)
if self.environment.is_cross_build():
# In case we are going a target-only build, make the native compilers
# masquerade as cross compilers.
if self.environment.cross_info.need_cross_compiler():
cclist = self.build.cross_compilers
else:
cclist = self.build.compilers
for langname, compiler in cclist.items():
if compiler.get_id() == 'clang':
self.generate_llvm_ir_compile_rule(compiler, True, outfile)
self.generate_compile_rule_for(langname, compiler, qstr, True, outfile)
self.generate_pch_rule_for(langname, compiler, qstr, True, outfile)
outfile.write('\n')
def generate_generator_list_rules(self, target, outfile):
# CustomTargets have already written their rules,
# so write rules for GeneratedLists here
for genlist in target.get_generated_sources():
if isinstance(genlist, build.CustomTarget):
continue
self.generate_genlist_for_target(genlist, target, outfile)
def generate_genlist_for_target(self, genlist, target, outfile):
generator = genlist.get_generator()
exe = generator.get_exe()
exe_arr = self.exe_object_to_cmd_array(exe)
infilelist = genlist.get_inputs()
outfilelist = genlist.get_outputs()
base_args = generator.get_arglist()
extra_dependencies = [os.path.join(self.build_to_src, i) for i in genlist.extra_depends]
source_target_dir = self.get_target_source_dir(target)
for i in range(len(infilelist)):
if len(generator.outputs) == 1:
sole_output = os.path.join(self.get_target_private_dir(target), outfilelist[i])
else:
sole_output = ''
curfile = infilelist[i]
infilename = curfile.rel_to_builddir(self.build_to_src)
outfiles = genlist.get_outputs_for(curfile)
outfiles = [os.path.join(self.get_target_private_dir(target), of) for of in outfiles]
if generator.depfile is None:
rulename = 'CUSTOM_COMMAND'
args = base_args
else:
rulename = 'CUSTOM_COMMAND_DEP'
depfilename = generator.get_dep_outname(infilename)
depfile = os.path.join(self.get_target_private_dir(target), depfilename)
args = [x.replace('@DEPFILE@', depfile) for x in base_args]
args = [x.replace("@INPUT@", infilename).replace('@OUTPUT@', sole_output)
for x in args]
args = self.replace_outputs(args, self.get_target_private_dir(target), outfilelist)
# We have consumed output files, so drop them from the list of remaining outputs.
if sole_output == '':
outfilelist = outfilelist[len(generator.outputs):]
relout = self.get_target_private_dir(target)
args = [x.replace("@SOURCE_DIR@", self.build_to_src).replace("@BUILD_DIR@", relout)
for x in args]
args = [x.replace("@CURRENT_SOURCE_DIR@", source_target_dir) for x in args]
args = [x.replace("@SOURCE_ROOT@", self.build_to_src).replace("@BUILD_ROOT@", '.')
for x in args]
cmdlist = exe_arr + self.replace_extra_args(args, genlist)
elem = NinjaBuildElement(self.all_outputs, outfiles, rulename, infilename)
if generator.depfile is not None:
elem.add_item('DEPFILE', depfile)
if len(extra_dependencies) > 0:
elem.add_dep(extra_dependencies)
elem.add_item('DESC', 'Generating $out')
if isinstance(exe, build.BuildTarget):
elem.add_dep(self.get_target_filename(exe))
elem.add_item('COMMAND', cmdlist)
elem.write(outfile)
def scan_fortran_module_outputs(self, target):
compiler = None
for lang, c in self.build.compilers.items():
if lang == 'fortran':
compiler = c
break
if compiler is None:
self.fortran_deps[target.get_basename()] = {}
return
modre = re.compile(r"\s*module\s+(\w+)", re.IGNORECASE)
module_files = {}
for s in target.get_sources():
# FIXME, does not work for Fortran sources generated by
# custom_target() and generator() as those are run after
# the configuration (configure_file() is OK)
if not compiler.can_compile(s):
continue
filename = s.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
with open(filename) as f:
for line in f:
modmatch = modre.match(line)
if modmatch is not None:
modname = modmatch.group(1)
if modname.lower() == 'procedure':
# MODULE PROCEDURE construct
continue
if modname in module_files:
raise InvalidArguments(
'Namespace collision: module %s defined in '
'two files %s and %s.' %
(modname, module_files[modname], s))
module_files[modname] = s
self.fortran_deps[target.get_basename()] = module_files
def get_fortran_deps(self, compiler, src, target):
mod_files = []
usere = re.compile(r"\s*use\s+(\w+)", re.IGNORECASE)
dirname = self.get_target_private_dir(target)
tdeps = self.fortran_deps[target.get_basename()]
with open(src) as f:
for line in f:
usematch = usere.match(line)
if usematch is not None:
usename = usematch.group(1)
if usename not in tdeps:
# The module is not provided by any source file. This
# is due to:
# a) missing file/typo/etc
# b) using a module provided by the compiler, such as
# OpenMP
# There's no easy way to tell which is which (that I
# know of) so just ignore this and go on. Ideally we
# would print a warning message to the user but this is
# a common occurrence, which would lead to lots of
# distracting noise.
continue
mod_source_file = tdeps[usename]
# Check if a source uses a module it exports itself.
# Potential bug if multiple targets have a file with
# the same name.
if mod_source_file.fname == os.path.split(src)[1]:
continue
mod_name = compiler.module_name_to_filename(
usematch.group(1))
mod_files.append(os.path.join(dirname, mod_name))
return mod_files
def get_cross_stdlib_args(self, target, compiler):
if not target.is_cross:
return []
if not self.environment.cross_info.has_stdlib(compiler.language):
return []
return compiler.get_no_stdinc_args()
def get_compile_debugfile_args(self, compiler, target, objfile):
if compiler.id != 'msvc':
return []
# The way MSVC uses PDB files is documented exactly nowhere so
# the following is what we have been able to decipher via
# reverse engineering.
#
# Each object file gets the path of its PDB file written
# inside it. This can be either the final PDB (for, say,
# foo.exe) or an object pdb (for foo.obj). If the former, then
# each compilation step locks the pdb file for writing, which
# is a bottleneck and object files from one target can not be
# used in a different target. The latter seems to be the
# sensible one (and what Unix does) but there is a catch. If
# you try to use precompiled headers MSVC will error out
# because both source and pch pdbs go in the same file and
# they must be the same.
#
# This means:
#
# - pch files must be compiled anew for every object file (negating
# the entire point of having them in the first place)
# - when using pch, output must go to the target pdb
#
# Since both of these are broken in some way, use the one that
# works for each target. This unfortunately means that you
# can't combine pch and object extraction in a single target.
#
# PDB files also lead to filename collisions. A target foo.exe
# has a corresponding foo.pdb. A shared library foo.dll _also_
# has pdb file called foo.pdb. So will a static library
# foo.lib, which clobbers both foo.pdb _and_ the dll file's
# export library called foo.lib (by default, currently we name
# them libfoo.a to avoidt this issue). You can give the files
# unique names such as foo_exe.pdb but VC also generates a
# bunch of other files which take their names from the target
# basename (i.e. "foo") and stomp on each other.
#
# CMake solves this problem by doing two things. First of all
# static libraries do not generate pdb files at
# all. Presumably you don't need them and VC is smart enough
# to look up the original data when linking (speculation, not
# tested). The second solution is that you can only have
# target named "foo" as an exe, shared lib _or_ static
# lib. This makes filename collisions not happen. The downside
# is that you can't have an executable foo that uses a shared
# library libfoo.so, which is a common idiom on Unix.
#
# If you feel that the above is completely wrong and all of
# this is actually doable, please send patches.
if target.has_pch():
tfilename = self.get_target_filename_abs(target)
return compiler.get_compile_debugfile_args(tfilename, pch=True)
else:
return compiler.get_compile_debugfile_args(objfile, pch=False)
def get_link_debugfile_args(self, linker, target, outname):
return linker.get_link_debugfile_args(outname)
def generate_llvm_ir_compile(self, target, outfile, src):
compiler = get_compiler_for_source(target.compilers.values(), src)
commands = CompilerArgs(compiler)
# Compiler args for compiling this target
commands += compilers.get_base_compile_args(self.environment.coredata.base_options,
compiler)
if isinstance(src, File):
if src.is_built:
src_filename = os.path.join(src.subdir, src.fname)
else:
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
if isinstance(src, File) and src.is_built:
rel_src = src.fname
elif isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments('Invalid source type: {!r}'.format(src))
# Write the Ninja build command
compiler_name = 'llvm_ir{}_COMPILER'.format('_CROSS' if target.is_cross else '')
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
element.add_item('ARGS', commands)
element.write(outfile)
return rel_obj
def get_source_dir_include_args(self, target, compiler):
curdir = target.get_subdir()
tmppath = os.path.normpath(os.path.join(self.build_to_src, curdir))
return compiler.get_include_args(tmppath, False)
def get_build_dir_include_args(self, target, compiler):
curdir = target.get_subdir()
if curdir == '':
curdir = '.'
return compiler.get_include_args(curdir, False)
def get_custom_target_dir_include_args(self, target, compiler):
custom_target_include_dirs = []
for i in target.get_generated_sources():
# Generator output goes into the target private dir which is
# already in the include paths list. Only custom targets have their
# own target build dir.
if not isinstance(i, build.CustomTarget):
continue
idir = self.get_target_dir(i)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
incs = []
for i in custom_target_include_dirs:
incs += compiler.get_include_args(i, False)
return incs
def _generate_single_compile(self, target, compiler, is_generated=False):
base_proxy = backends.OptionOverrideProxy(target.option_overrides,
self.environment.coredata.base_options)
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
commands = CompilerArgs(compiler)
# Add compiler args for compiling this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
commands += compilers.get_base_compile_args(base_proxy,
compiler)
# The code generated by valac is usually crap and has tons of unused
# variables and such, so disable warnings for Vala C sources.
no_warn_args = (is_generated == 'vala')
# Add compiler args and include paths from several sources; defaults,
# build options, external dependencies, etc.
commands += self.generate_basic_compiler_args(target, compiler, no_warn_args)
# Add include dirs from the `include_directories:` kwarg on the target
# and from `include_directories:` of internal deps of the target.
#
# Target include dirs should override internal deps include dirs.
# This is handled in BuildTarget.process_kwargs()
#
# Include dirs from internal deps should override include dirs from
# external deps and must maintain the order in which they are specified.
# Hence, we must reverse the list so that the order is preserved.
for i in reversed(target.get_include_dirs()):
basedir = i.get_curdir()
for d in i.get_incdirs():
# Avoid superfluous '/.' at the end of paths when d is '.'
if d not in ('', '.'):
expdir = os.path.join(basedir, d)
else:
expdir = basedir
srctreedir = os.path.join(self.build_to_src, expdir)
# Add source subdir first so that the build subdir overrides it
sargs = compiler.get_include_args(srctreedir, i.is_system)
commands += sargs
# There may be include dirs where a build directory has not been
# created for some source dir. For example if someone does this:
#
# inc = include_directories('foo/bar/baz')
#
# But never subdir()s into the actual dir.
if os.path.isdir(os.path.join(self.environment.get_build_dir(), expdir)):
bargs = compiler.get_include_args(expdir, i.is_system)
else:
bargs = []
commands += bargs
for d in i.get_extra_build_dirs():
commands += compiler.get_include_args(d, i.is_system)
# Add per-target compile args, f.ex, `c_args : ['-DFOO']`. We set these
# near the end since these are supposed to override everything else.
commands += self.escape_extra_args(compiler,
target.get_extra_args(compiler.get_language()))
# Add source dir and build dir. Project-specific and target-specific
# include paths must override per-target compile args, include paths
# from external dependencies, internal dependencies, and from
# per-target `include_directories:`
#
# We prefer headers in the build dir and the custom target dir over the
# source dir since, for instance, the user might have an
# srcdir == builddir Autotools build in their source tree. Many
# projects that are moving to Meson have both Meson and Autotools in
# parallel as part of the transition.
commands += self.get_source_dir_include_args(target, compiler)
commands += self.get_custom_target_dir_include_args(target, compiler)
commands += self.get_build_dir_include_args(target, compiler)
# Finally add the private dir for the target to the include path. This
# must override everything else and must be the final path added.
commands += compiler.get_include_args(self.get_target_private_dir(target), False)
return commands
def generate_single_compile(self, target, outfile, src, is_generated=False, header_deps=[], order_deps=[]):
"""
Compiles C/C++, ObjC/ObjC++, Fortran, and D sources
"""
if isinstance(src, str) and src.endswith('.h'):
raise AssertionError('BUG: sources should not contain headers {!r}'.format(src))
compiler = get_compiler_for_source(target.compilers.values(), src)
key = (target, compiler, is_generated)
if key in self.target_arg_cache:
commands = self.target_arg_cache[key]
else:
commands = self._generate_single_compile(target, compiler, is_generated)
self.target_arg_cache[key] = commands
commands = CompilerArgs(commands.compiler, commands)
if isinstance(src, mesonlib.File) and src.is_built:
rel_src = os.path.join(src.subdir, src.fname)
if os.path.isabs(rel_src):
assert(rel_src.startswith(self.environment.get_build_dir()))
rel_src = rel_src[len(self.environment.get_build_dir())+1:]
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
elif isinstance(src, mesonlib.File):
rel_src = src.rel_to_builddir(self.build_to_src)
abs_src = src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
elif is_generated:
raise AssertionError('BUG: broken generated source file handling for {!r}'.format(src))
else:
if isinstance(src, File):
rel_src = src.rel_to_builddir(self.build_to_src)
else:
raise InvalidArguments('Invalid source type: {!r}'.format(src))
abs_src = os.path.join(self.environment.get_build_dir(), rel_src)
if isinstance(src, File):
if src.is_built:
src_filename = os.path.join(src.subdir, src.fname)
if os.path.isabs(src_filename):
assert(src_filename.startswith(self.environment.get_build_dir()))
src_filename = src_filename[len(self.environment.get_build_dir())+1:]
else:
src_filename = src.fname
elif os.path.isabs(src):
src_filename = os.path.basename(src)
else:
src_filename = src
obj_basename = src_filename.replace('/', '_').replace('\\', '_')
rel_obj = os.path.join(self.get_target_private_dir(target), obj_basename)
rel_obj += '.' + self.environment.get_object_suffix()
dep_file = compiler.depfile_for_object(rel_obj)
# Add MSVC debug file generation compile flags: /Fd /FS
commands += self.get_compile_debugfile_args(compiler, target, rel_obj)
# PCH handling
if self.environment.coredata.base_options.get('b_pch', False):
commands += self.get_pch_include_args(compiler, target)
pchlist = target.get_pch(compiler.language)
else:
pchlist = []
if not pchlist:
pch_dep = []
elif compiler.id == 'intel':
pch_dep = []
else:
arr = []
i = os.path.join(self.get_target_private_dir(target), compiler.get_pch_name(pchlist[0]))
arr.append(i)
pch_dep = arr
crstr = ''
if target.is_cross:
crstr = '_CROSS'
compiler_name = '%s%s_COMPILER' % (compiler.get_language(), crstr)
extra_deps = []
if compiler.get_language() == 'fortran':
# Can't read source file to scan for deps if it's generated later
# at build-time. Skip scanning for deps, and just set the module
# outdir argument instead.
# https://github.com/mesonbuild/meson/issues/1348
if not is_generated:
extra_deps += self.get_fortran_deps(compiler, abs_src, target)
# Dependency hack. Remove once multiple outputs in Ninja is fixed:
# https://groups.google.com/forum/#!topic/ninja-build/j-2RfBIOd_8
for modname, srcfile in self.fortran_deps[target.get_basename()].items():
modfile = os.path.join(self.get_target_private_dir(target),
compiler.module_name_to_filename(modname))
if srcfile == src:
depelem = NinjaBuildElement(self.all_outputs, modfile, 'FORTRAN_DEP_HACK', rel_obj)
depelem.write(outfile)
commands += compiler.get_module_outdir_args(self.get_target_private_dir(target))
element = NinjaBuildElement(self.all_outputs, rel_obj, compiler_name, rel_src)
for d in header_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_dep(d)
for d in extra_deps:
element.add_dep(d)
for d in order_deps:
if isinstance(d, File):
d = d.rel_to_builddir(self.build_to_src)
elif not self.has_dir_part(d):
d = os.path.join(self.get_target_private_dir(target), d)
element.add_orderdep(d)
element.add_orderdep(pch_dep)
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
for i in self.get_fortran_orderdeps(target, compiler):
element.add_orderdep(i)
element.add_item('DEPFILE', dep_file)
element.add_item('ARGS', commands)
element.write(outfile)
return rel_obj
def has_dir_part(self, fname):
# FIXME FIXME: The usage of this is a terrible and unreliable hack
if isinstance(fname, File):
return fname.subdir != ''
return '/' in fname or '\\' in fname
# Fortran is a bit weird (again). When you link against a library, just compiling a source file
# requires the mod files that are output when single files are built. To do this right we would need to
# scan all inputs and write out explicit deps for each file. That is stoo slow and too much effort so
# instead just have an ordered dependendy on the library. This ensures all required mod files are created.
# The real deps are then detected via dep file generation from the compiler. This breaks on compilers that
# produce incorrect dep files but such is life.
def get_fortran_orderdeps(self, target, compiler):
if compiler.language != 'fortran':
return []
return [os.path.join(self.get_target_dir(lt), lt.get_filename()) for lt in target.link_targets]
def generate_msvc_pch_command(self, target, compiler, pch):
if len(pch) != 2:
raise RuntimeError('MSVC requires one header and one source to produce precompiled headers.')
header = pch[0]
source = pch[1]
pchname = compiler.get_pch_name(header)
dst = os.path.join(self.get_target_private_dir(target), pchname)
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
just_name = os.path.split(header)[1]
(objname, pch_args) = compiler.gen_pch_args(just_name, source, dst)
commands += pch_args
commands += self.get_compile_debugfile_args(compiler, target, objname)
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [objname]
def generate_gcc_pch_command(self, target, compiler, pch):
commands = []
commands += self.generate_basic_compiler_args(target, compiler)
dst = os.path.join(self.get_target_private_dir(target),
os.path.split(pch)[-1] + '.' + compiler.get_pch_suffix())
dep = dst + '.' + compiler.get_depfile_suffix()
return commands, dep, dst, [] # Gcc does not create an object file during pch generation.
def generate_pch(self, target, outfile):
cstr = ''
pch_objects = []
if target.is_cross:
cstr = '_CROSS'
for lang in ['c', 'cpp']:
pch = target.get_pch(lang)
if not pch:
continue
if '/' not in pch[0] or '/' not in pch[-1]:
msg = 'Precompiled header of {!r} must not be in the same ' \
'directory as source, please put it in a subdirectory.' \
''.format(target.get_basename())
raise InvalidArguments(msg)
compiler = target.compilers[lang]
if compiler.id == 'msvc':
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[-1])
(commands, dep, dst, objs) = self.generate_msvc_pch_command(target, compiler, pch)
extradep = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
elif compiler.id == 'intel':
# Intel generates on target generation
continue
else:
src = os.path.join(self.build_to_src, target.get_source_subdir(), pch[0])
(commands, dep, dst, objs) = self.generate_gcc_pch_command(target, compiler, pch[0])
extradep = None
pch_objects += objs
rulename = compiler.get_language() + cstr + '_PCH'
elem = NinjaBuildElement(self.all_outputs, dst, rulename, src)
if extradep is not None:
elem.add_dep(extradep)
elem.add_item('ARGS', commands)
elem.add_item('DEPFILE', dep)
elem.write(outfile)
return pch_objects
def generate_shsym(self, outfile, target):
target_name = self.get_target_filename(target)
targetdir = self.get_target_private_dir(target)
symname = os.path.join(targetdir, target_name + '.symbols')
elem = NinjaBuildElement(self.all_outputs, symname, 'SHSYM', target_name)
if self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler():
elem.add_item('CROSS', '--cross-host=' + self.environment.cross_info.config['host_machine']['system'])
elem.write(outfile)
def get_cross_stdlib_link_args(self, target, linker):
if isinstance(target, build.StaticLibrary) or not target.is_cross:
return []
if not self.environment.cross_info.has_stdlib(linker.language):
return []
return linker.get_no_stdlib_link_args()
def get_target_type_link_args(self, target, linker):
abspath = os.path.join(self.environment.get_build_dir(), target.subdir)
commands = []
if isinstance(target, build.Executable):
# Currently only used with the Swift compiler to add '-emit-executable'
commands += linker.get_std_exe_link_args()
# If gui_app, and that's significant on this platform
if target.gui_app and hasattr(linker, 'get_gui_app_args'):
commands += linker.get_gui_app_args()
elif isinstance(target, build.SharedLibrary):
if isinstance(target, build.SharedModule):
commands += linker.get_std_shared_module_link_args()
else:
commands += linker.get_std_shared_lib_link_args()
# All shared libraries are PIC
commands += linker.get_pic_args()
# Add -Wl,-soname arguments on Linux, -install_name on OS X
commands += linker.get_soname_args(target.prefix, target.name, target.suffix,
abspath, target.soversion,
isinstance(target, build.SharedModule))
# This is only visited when building for Windows using either GCC or Visual Studio
if target.vs_module_defs and hasattr(linker, 'gen_vs_module_defs_args'):
commands += linker.gen_vs_module_defs_args(target.vs_module_defs.rel_to_builddir(self.build_to_src))
# This is only visited when building for Windows using either GCC or Visual Studio
if target.import_filename:
commands += linker.gen_import_library_args(os.path.join(target.subdir, target.import_filename))
elif isinstance(target, build.StaticLibrary):
commands += linker.get_std_link_args()
else:
raise RuntimeError('Unknown build target type.')
return commands
def get_link_whole_args(self, linker, target):
target_args = self.build_target_link_arguments(linker, target.link_whole_targets)
return linker.get_link_whole_for(target_args) if len(target_args) else []
def generate_link(self, target, outfile, outname, obj_list, linker, extra_args=[]):
if isinstance(target, build.StaticLibrary):
linker_base = 'STATIC'
else:
linker_base = linker.get_language() # Fixme.
if isinstance(target, build.SharedLibrary):
self.generate_shsym(outfile, target)
crstr = ''
if target.is_cross:
crstr = '_CROSS'
linker_rule = linker_base + crstr + '_LINKER'
# Create an empty commands list, and start adding link arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
#
# Once all the linker options have been passed, we will start passing
# libraries and library paths from internal and external sources.
commands = CompilerArgs(linker)
# First, the trivial ones that are impossible to override.
#
# Add linker args for linking this target derived from 'base' build
# options passed on the command-line, in default_options, etc.
# These have the lowest priority.
if not isinstance(target, build.StaticLibrary):
commands += compilers.get_base_link_args(self.environment.coredata.base_options,
linker,
isinstance(target, build.SharedModule))
# Add -nostdlib if needed; can't be overriden
commands += self.get_cross_stdlib_link_args(target, linker)
# Add things like /NOLOGO; usually can't be overriden
commands += linker.get_linker_always_args()
# Add buildtype linker args: optimization level, etc.
commands += linker.get_buildtype_linker_args(self.get_option_for_target('buildtype', target))
# Add /DEBUG and the pdb filename when using MSVC
commands += self.get_link_debugfile_args(linker, target, outname)
# Add link args specific to this BuildTarget type, such as soname args,
# PIC, import library generation, etc.
commands += self.get_target_type_link_args(target, linker)
# Archives that are copied wholesale in the result. Must be before any
# other link targets so missing symbols from whole archives are found in those.
if not isinstance(target, build.StaticLibrary):
commands += self.get_link_whole_args(linker, target)
if not isinstance(target, build.StaticLibrary):
# Add link args added using add_project_link_arguments()
commands += self.build.get_project_link_args(linker, target.subproject)
# Add link args added using add_global_link_arguments()
# These override per-project link arguments
commands += self.build.get_global_link_args(linker)
if not target.is_cross:
# Link args added from the env: LDFLAGS. We want these to
# override all the defaults but not the per-target link args.
commands += self.environment.coredata.external_link_args[linker.get_language()]
# Now we will add libraries and library paths from various sources
# Add link args to link to all internal libraries (link_with:) and
# internal dependencies needed by this target.
if linker_base == 'STATIC':
# Link arguments of static libraries are not put in the command
# line of the library. They are instead appended to the command
# line where the static library is used.
dependencies = []
else:
dependencies = target.get_dependencies()
commands += self.build_target_link_arguments(linker, dependencies)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
for d in target.external_deps:
if d.need_threads():
commands += linker.thread_link_flags()
# Only non-static built targets need link args and link dependencies
if not isinstance(target, build.StaticLibrary):
commands += target.link_args
# External deps must be last because target link libraries may depend on them.
for dep in target.get_external_deps():
commands += dep.get_link_args()
for d in target.get_dependencies():
if isinstance(d, build.StaticLibrary):
for dep in d.get_external_deps():
commands += dep.get_link_args()
# Add link args for c_* or cpp_* build options. Currently this only
# adds c_winlibs and cpp_winlibs when building for Windows. This needs
# to be after all internal and external libraries so that unresolved
# symbols from those can be found here. This is needed when the
# *_winlibs that we want to link to are static mingw64 libraries.
commands += linker.get_option_link_args(self.environment.coredata.compiler_options)
# Set runtime-paths so we can run executables without needing to set
# LD_LIBRARY_PATH, etc in the environment. Doesn't work on Windows.
commands += linker.build_rpath_args(self.environment.get_build_dir(),
self.determine_rpath_dirs(target),
target.install_rpath)
# Add libraries generated by custom targets
custom_target_libraries = self.get_custom_target_provided_libraries(target)
commands += extra_args
commands += custom_target_libraries
# Convert from GCC-style link argument naming to the naming used by the
# current compiler.
commands = commands.to_native()
dep_targets = [self.get_dependency_filename(t) for t in dependencies]
dep_targets.extend([self.get_dependency_filename(t)
for t in target.link_depends])
elem = NinjaBuildElement(self.all_outputs, outname, linker_rule, obj_list)
elem.add_dep(dep_targets + custom_target_libraries)
elem.add_item('LINK_ARGS', commands)
return elem
def determine_rpath_dirs(self, target):
link_deps = target.get_all_link_deps()
result = []
for ld in link_deps:
prospective = self.get_target_dir(ld)
if prospective not in result:
result.append(prospective)
return result
def get_dependency_filename(self, t):
if isinstance(t, build.SharedLibrary):
return os.path.join(self.get_target_private_dir(t), self.get_target_filename(t) + '.symbols')
elif isinstance(t, mesonlib.File):
if t.is_built:
return t.relative_name()
else:
return t.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
return self.get_target_filename(t)
def generate_shlib_aliases(self, target, outdir):
aliases = target.get_aliases()
for alias, to in aliases.items():
aliasfile = os.path.join(self.environment.get_build_dir(), outdir, alias)
try:
os.remove(aliasfile)
except Exception:
pass
try:
os.symlink(to, aliasfile)
except NotImplementedError:
mlog.debug("Library versioning disabled because symlinks are not supported.")
except OSError:
mlog.debug("Library versioning disabled because we do not have symlink creation privileges.")
def generate_custom_target_clean(self, outfile, trees):
e = NinjaBuildElement(self.all_outputs, 'clean-ctlist', 'CUSTOM_COMMAND', 'PHONY')
d = CleanTrees(self.environment.get_build_dir(), trees)
d_file = os.path.join(self.environment.get_scratch_dir(), 'cleantrees.dat')
e.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'cleantrees', d_file])
e.add_item('description', 'Cleaning custom target directories.')
e.write(outfile)
# Write out the data file passed to the script
with open(d_file, 'wb') as ofile:
pickle.dump(d, ofile)
return 'clean-ctlist'
def generate_gcov_clean(self, outfile):
gcno_elem = NinjaBuildElement(self.all_outputs, 'clean-gcno', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcno_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcno'])
gcno_elem.add_item('description', 'Deleting gcno files.')
gcno_elem.write(outfile)
gcda_elem = NinjaBuildElement(self.all_outputs, 'clean-gcda', 'CUSTOM_COMMAND', 'PHONY')
script_root = self.environment.get_script_dir()
clean_script = os.path.join(script_root, 'delwithsuffix.py')
gcda_elem.add_item('COMMAND', [sys.executable, clean_script, '.', 'gcda'])
gcda_elem.add_item('description', 'Deleting gcda files.')
gcda_elem.write(outfile)
def get_user_option_args(self):
cmds = []
for (k, v) in self.environment.coredata.user_options.items():
cmds.append('-D' + k + '=' + (v.value if isinstance(v.value, str) else str(v.value).lower()))
# The order of these arguments must be the same between runs of Meson
# to ensure reproducible output. The order we pass them shouldn't
# affect behavior in any other way.
return sorted(cmds)
def generate_dist(self, outfile):
elem = NinjaBuildElement(self.all_outputs, 'dist', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('DESC', 'Creating source packages')
elem.add_item('COMMAND', [sys.executable,
self.environment.get_build_command(),
'--internal', 'dist',
self.environment.source_dir,
self.environment.build_dir,
sys.executable,
self.environment.get_build_command()])
elem.add_item('pool', 'console')
elem.write(outfile)
# For things like scan-build and other helper tools we might have.
def generate_utils(self, outfile):
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'scanbuild', self.environment.source_dir, self.environment.build_dir,
sys.executable, self.environment.get_build_command()] + self.get_user_option_args()
elem = NinjaBuildElement(self.all_outputs, 'scan-build', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
elem.write(outfile)
cmd = [sys.executable, self.environment.get_build_command(),
'--internal', 'uninstall']
elem = NinjaBuildElement(self.all_outputs, 'uninstall', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', cmd)
elem.add_item('pool', 'console')
elem.write(outfile)
def generate_ending(self, outfile):
targetlist = []
for t in self.get_build_by_default_targets().values():
# Add the first output of each target to the 'all' target so that
# they are all built
targetlist.append(os.path.join(self.get_target_dir(t), t.get_outputs()[0]))
elem = NinjaBuildElement(self.all_outputs, 'all', 'phony', targetlist)
elem.write(outfile)
default = 'default all\n\n'
outfile.write(default)
ninja_command = environment.detect_ninja()
if ninja_command is None:
raise MesonException('Could not detect Ninja v1.6 or newer')
elem = NinjaBuildElement(self.all_outputs, 'clean', 'CUSTOM_COMMAND', 'PHONY')
elem.add_item('COMMAND', [ninja_command, '-t', 'clean'])
elem.add_item('description', 'Cleaning.')
# If we have custom targets in this project, add all their outputs to
# the list that is passed to the `cleantrees.py` script. The script
# will manually delete all custom_target outputs that are directories
# instead of files. This is needed because on platforms other than
# Windows, Ninja only deletes directories while cleaning if they are
# empty. https://github.com/mesonbuild/meson/issues/1220
ctlist = []
for t in self.build.get_targets().values():
if isinstance(t, build.CustomTarget):
# Create a list of all custom target outputs
for o in t.get_outputs():
ctlist.append(os.path.join(self.get_target_dir(t), o))
if ctlist:
elem.add_dep(self.generate_custom_target_clean(outfile, ctlist))
if 'b_coverage' in self.environment.coredata.base_options and \
self.environment.coredata.base_options['b_coverage'].value:
self.generate_gcov_clean(outfile)
elem.add_dep('clean-gcda')
elem.add_dep('clean-gcno')
elem.write(outfile)
deps = self.get_regen_filelist()
elem = NinjaBuildElement(self.all_outputs, 'build.ninja', 'REGENERATE_BUILD', deps)
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, 'reconfigure', 'REGENERATE_BUILD', 'PHONY')
elem.add_item('pool', 'console')
elem.write(outfile)
elem = NinjaBuildElement(self.all_outputs, deps, 'phony', '')
elem.write(outfile)
| apache-2.0 | 4,618,508,688,966,883,000 | 48.26108 | 132 | 0.579532 | false |
Tejal011089/med2-app | selling/utils/__init__.py | 1 | 63144 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import _, throw
from webnotes.utils import flt, cint
from webnotes.utils import load_json, nowdate, cstr
from webnotes.model.code import get_obj
from webnotes.model.doc import Document
from webnotes import msgprint
from webnotes.model.bean import getlist, copy_doclist
#from webnotes.model.code import get_obj
from webnotes.model.bean import getlist, copy_doclist
from datetime import datetime, timedelta,date
from webnotes.utils.email_lib import sendmail
import json
def get_customer_list(doctype, txt, searchfield, start, page_len, filters):
if webnotes.conn.get_default("cust_master_name") == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
return webnotes.conn.sql("""select %s from `tabCustomer` where docstatus < 2
and (%s like %s or customer_name like %s) order by
case when name like %s then 0 else 1 end,
case when customer_name like %s then 0 else 1 end,
name, customer_name limit %s, %s""" %
(", ".join(fields), searchfield, "%s", "%s", "%s", "%s", "%s", "%s"),
("%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, "%%%s%%" % txt, start, page_len))
def get_accessories(doctype, txt, searchfield, start, page_len, filters):
webnotes.errprint("ijn init ")
@webnotes.whitelist()
def get_item_details(args):
"""
args = {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0
}
"""
if isinstance(args, basestring):
args = json.loads(args)
args = webnotes._dict(args)
if args.barcode:
args.item_code = _get_item_code(barcode=args.barcode)
elif not args.item_code and args.serial_no:
args.item_code = _get_item_code(serial_no=args.serial_no)
item_bean = webnotes.bean("Item", args.item_code)
_validate_item_details(args, item_bean.doc)
meta = webnotes.get_doctype(args.doctype)
#return args.item_code
# hack! for Sales Order Item
warehouse_fieldname = "warehouse"
if meta.get_field("reserved_warehouse", parentfield=args.parentfield):
warehouse_fieldname = "reserved_warehouse"
out = _get_basic_details(args, item_bean, warehouse_fieldname)
if meta.get_field("currency"):
out.base_ref_rate = out.basic_rate = out.ref_rate = out.export_rate = 0.0
if args.selling_price_list and args.price_list_currency:
out.update(_get_price_list_rate(args, item_bean, meta))
out.update(_get_item_discount(out.item_group, args.customer))
if out.get(warehouse_fieldname):
out.update(get_available_qty(args.item_code, out.get(warehouse_fieldname)))
out.customer_item_code = _get_customer_item_code(args, item_bean)
if cint(args.is_pos):
pos_settings = get_pos_settings(args.company)
if pos_settings:
out.update(apply_pos_settings(pos_settings, out))
if args.doctype in ("Sales Invoice", "Delivery Note"):
if item_bean.doc.has_serial_no == "Yes" and not args.serial_no:
out.serial_no = _get_serial_nos_by_fifo(args, item_bean)
# accessories= webnotes.conn.sql(""" select item_code from `tabAccessories`
# where parent='%s'"""%args.item_code,as_list=1)
# if accessories:
# return out, accessories
# else:
return out
@webnotes.whitelist()
def get_accssories_details(args):
if isinstance(args, basestring):
args = json.loads(args)
args = webnotes._dict(args)
accessories= webnotes.conn.sql(""" select item_code from `tabAccessories`
where parent='%s'"""%args.item_code,as_list=1)
if accessories:
return accessories
else:
return ''
def _get_serial_nos_by_fifo(args, item_bean):
return "\n".join(webnotes.conn.sql_list("""select name from `tabSerial No`
where item_code=%(item_code)s and warehouse=%(warehouse)s and status='Available'
order by timestamp(purchase_date, purchase_time) asc limit %(qty)s""", {
"item_code": args.item_code,
"warehouse": args.warehouse,
"qty": cint(args.qty)
}))
def _get_item_code(barcode=None, serial_no=None):
if barcode:
input_type = "Barcode"
item_code = webnotes.conn.sql_list("""select name from `tabItem` where barcode=%s""", barcode)
elif serial_no:
input_type = "Serial No"
item_code = webnotes.conn.sql_list("""select item_code from `tabSerial No`
where name=%s""", serial_no)
if not item_code:
throw(_("No Item found with ") + input_type + ": %s" % (barcode or serial_no))
return item_code[0]
def _validate_item_details(args, item):
from utilities.transaction_base import validate_item_fetch
validate_item_fetch(args, item)
# validate if sales item or service item
if args.order_type == "Maintenance":
if item.is_service_item != "Yes":
throw(_("Item") + (" %s: " % item.name) +
_("not a service item.") +
_("Please select a service item or change the order type to Sales."))
elif item.is_sales_item != "Yes":
throw(_("Item") + (" %s: " % item.name) + _("not a sales item"))
def _get_basic_details(args, item_bean, warehouse_fieldname):
item = item_bean.doc
from webnotes.defaults import get_user_default_as_list
user_default_warehouse_list = get_user_default_as_list('warehouse')
user_default_warehouse = user_default_warehouse_list[0] \
if len(user_default_warehouse_list)==1 else ""
out = webnotes._dict({
"item_code": item.name,
"description": item.description_html or item.description,
warehouse_fieldname: user_default_warehouse or item.default_warehouse \
or args.get(warehouse_fieldname),
"income_account": item.default_income_account or args.income_account \
or webnotes.conn.get_value("Company", args.company, "default_income_account"),
"expense_account": item.purchase_account or args.expense_account \
or webnotes.conn.get_value("Company", args.company, "default_expense_account"),
"cost_center": item.default_sales_cost_center or args.cost_center,
"qty": 1.0,
"export_amount": 0.0,
"amount": 0.0,
"batch_no": None,
"item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in
item_bean.doclist.get({"parentfield": "item_tax"})))),
})
for fieldname in ("item_name", "item_group", "barcode", "brand", "stock_uom"):
out[fieldname] = item.fields.get(fieldname)
return out
def _get_price_list_rate(args, item_bean, meta):
ref_rate = webnotes.conn.sql("""select ref_rate from `tabItem Price`
where price_list=%s and item_code=%s and selling=1""",
(args.selling_price_list, args.item_code), as_dict=1)
if not ref_rate:
return {}
# found price list rate - now we can validate
from utilities.transaction_base import validate_currency
validate_currency(args, item_bean.doc, meta)
return {"ref_rate": flt(ref_rate[0].ref_rate) * flt(args.plc_conversion_rate) / flt(args.conversion_rate)}
def _get_item_discount(item_group, customer):
parent_item_groups = [x[0] for x in webnotes.conn.sql("""SELECT parent.name
FROM `tabItem Group` AS node, `tabItem Group` AS parent
WHERE parent.lft <= node.lft and parent.rgt >= node.rgt and node.name = %s
GROUP BY parent.name
ORDER BY parent.lft desc""", (item_group,))]
discount = 0
for d in parent_item_groups:
res = webnotes.conn.sql("""select discount, name from `tabCustomer Discount`
where parent = %s and item_group = %s""", (customer, d))
if res:
discount = flt(res[0][0])
break
return {"adj_rate": discount}
def send_sms(msg,sender_no):
ss = get_obj('SMS Settings', 'SMS Settings', with_children=1)
webnotes.errprint("In send SMS ")
webnotes.errprint(ss)
#return ss
args = {}
#msg="Ticket Created"
for d in getlist(ss.doclist, 'static_parameter_details'):
args[d.parameter] = d.value
sms_url=webnotes.conn.get_value('SMS Settings', None, 'sms_gateway_url')
msg_parameter=webnotes.conn.get_value('SMS Settings', None, 'message_parameter')
receiver_parameter=webnotes.conn.get_value('SMS Settings', None, 'receiver_parameter')
url = sms_url +"?username="+ args["username"] +"&password="+args["password"]+"&sendername="+ args["sendername"] +"&mobileno="+ sender_no +"&message=" + msg
webnotes.errprint(url)
import requests
r = requests.get(url)
def send_email(email,msg):
webnotes.errprint("in email")
#webnotes.msgprint(email)
from webnotes.utils.email_lib import sendmail
sendmail(email, subject="Payment Due Details", msg = msg)
@webnotes.whitelist()
def get_available_qty(item_code, warehouse):
return webnotes.conn.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
["projected_qty", "actual_qty"], as_dict=True) or {}
def _get_customer_item_code(args, item_bean):
customer_item_code = item_bean.doclist.get({"parentfield": "item_customer_details",
"customer_name": args.customer})
return customer_item_code and customer_item_code[0].ref_code or None
def get_pos_settings(company):
pos_settings = webnotes.conn.sql("""select * from `tabPOS Setting` where user = %s
and company = %s""", (webnotes.session['user'], company), as_dict=1)
if not pos_settings:
pos_settings = webnotes.conn.sql("""select * from `tabPOS Setting`
where ifnull(user,'') = '' and company = %s""", company, as_dict=1)
return pos_settings and pos_settings[0] or None
def apply_pos_settings(pos_settings, opts):
out = {}
for fieldname in ("income_account", "cost_center", "warehouse", "expense_account"):
if not opts.get(fieldname):
out[fieldname] = pos_settings.get(fieldname)
if out.get("warehouse"):
out["actual_qty"] = get_available_qty(opts.item_code, out.get("warehouse")).get("actual_qty")
return out
@webnotes.whitelist(allow_guest=True)
def get_installation_note(customer,emp_id,_type='POST'):
#return "hello "+customer
qr="select customer_name from `tabCustomer` where customer_name="+customer+" "
res=webnotes.conn.sql(qr)
#return res
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
qry="select name from `tabFiscal Year` where is_fiscal_year_closed='No'"
res1=webnotes.conn.sql(qry)
#return res1[0][0]
from webnotes.model.doc import Document
import time
if res :
d= Document('Installation Note')
d.customer=customer[1:-1]
d.customer_name=customer[1:-1]
d.inst_time=time.strftime("%H:%M:%S")
d.inst_date=today
d.employee_id=emp_id[1:-1]
#return d.employee_id
d.fiscal_year=res1[0][0]
d.company='medsynaptic'
d.territory='India'
d.customer_group='Individual'
#return d.fiscal_year
d.save()
webnotes.conn.commit()
return d.name
else:
d= Document('Customer')
d.customer_name=customer[1:-1]
d.customer_type='Individual'
d.customer_group='Individual'
d.territory='India'
d.save()
webnotes.conn.commit()
c= Document('Installation Note')
c.customer=customer[1:-1]
c.inst_time=time.strftime("%H:%M:%S")
c.inst_date=today
c.fiscal_year=res1[0][0]
c.employee_id=emp_id[1:-1]
c.company='Medsynaptic'
c.territory='India'
c.customer_group='Individual'
c.save()
webnotes.conn.commit()
return c.name
@webnotes.whitelist(allow_guest=True)
def get_customer_issue(installationname,sender_no,message,_type='POST'):
#return installationname[1:-1]
#sender_no1=sender_no[-11:]
qr="select customer,employee_id from `tabInstallation Note` where name='"+installationname[1:-1]+"' "
res=webnotes.conn.sql(qr)
#return qr
x="select customer_name from `tabCustomer` where customer_no='"+sender_no[1:-1]+"' "
y=webnotes.conn.sql(x)
#return x
m= None
if not y :
z="select user_id from `tabEmployee` where cell_number="+sender_no[1:-1]+""
m=webnotes.conn.sql(z)
#return m
w="select status,user_id from `tabEmployee` where name='%s'"%(res[0][1]);
t=webnotes.conn.sql(w)
#return t
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
qry="select name from `tabFiscal Year` where is_fiscal_year_closed='No'"
res1=webnotes.conn.sql(qry)
q=" select territory from `tabCustomer` where name='%s'"%(res[0][0]);
r=webnotes.conn.sql(q)
w="select y.parent from `tabDefaultValue` y,`tabProfile` p, `tabUserRole` r where defkey = '%s' and defvalue = '%s' and r.role='Manager'"%('territory',r[0][0])
a=webnotes.conn.sql(w)
#return a
from webnotes.model.doc import Document
import time
#if res :
d = Document('Support Ticket')
d.opening_time=time.strftime("%H:%M:%S")
if y:
d.raised_by=y[0][0]
elif m:
d.raised_by=z[0][0]
else:
d.raised_by=sender_no[-11:]
d.subject=installationname[1:-1]
d.customer_name=res[0][0]
d.customer=res[0][0]
d.territory=r[0][0]
d.status='Open'
#d.customer_group='Individual'
d.opening_date=today
#d.fiscal_year=res1[0][0]
d.company='medsynaptic'
d.territory=r[0][0]
#d.raised_by=res[0][1]
if t[0][0] =='Active':
#return t[0][1]
d.assigned_to=t[0][1]
d.assigned_to_higher_level=a[0][0]
else:
d.assigned_to=a[0][0]
d.assigned_to_higher_level=a[0][0]
#d.assigned_to_higher_level=a[0][0]
#return d.fiscal_year
d.save()
webnotes.conn.commit()
#return sender_no[1:-1]
p=send_sms(message[1:-1],sender_no[1:-1])
return d.name
#else:
#d= Document('Customer')
#d.customer_name=customer[1:-1]
#d.customer_group='Individual'
#d.customer_name=customer[1:-1]
#d.territory='India'
#d.save()
#webnotes.conn.commit()
#c= Document('Installation Note')
#c.inst_time=time.strftime("%H:%M:%S")
#c.inst_date=today
#c.customer=customer[1:-1]
#c.customer_name=customer[1:-1]
#c.complaint=complaint[1:-1]
#c.status='Open'
#c.complaint_date=today
#c.fiscal_year=res1[0][0]
#c.company='medsynaptic'
#c.territory='India'
#c.complaint_raised_by=customer[1:-1]
#c.save()
#webnotes.conn.commit()
#return c.name
@webnotes.whitelist(allow_guest=True)
def get_support_ticket(code,sender_no,message,_type='POST'):
#return "hello"
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
from webnotes.model.doc import Document
import time
#return sender_no[1:-1]
if code[1:-1] =="CRT":
#return "hello"
#return sender_no[1:-1]
msg="Dear Customer,According to your request ticket is created"
d= Document('Support Ticket')
d.opening_time=time.strftime("%H:%M:%S")
d.opening_date=today
d.subject=message[1:-1]
d.raised_by=sender_no[1:-1]
d.company='medsynaptic'
d.status='Open'
d.save()
webnotes.conn.commit()
p=send_sms(msg,sender_no[1:-1])
return d.name
elif code[1:-1]=="CLS":
#return "hii"
#msg="Ticket Closed"
#sender_no1=sender_no[-11:]
z="select name from `tabSupport Ticket` where raised_by="+sender_no[1:-1]+" and status='Open'"
x=webnotes.conn.sql(z)
#return x
msg="Dear Customer,according to your request respective ticket is closed"
if x:
g="update `tabSupport Ticket` set status='Closed' where name='%s'"%(x[0][0])
h=webnotes.conn.sql(g)
webnotes.conn.sql("commit")
e=send_sms(msg,sender_no[1:-1])
#webnotes.er
return "Updated"
else:
pass
else:
pass
@webnotes.whitelist(allow_guest=True)
def get_activity_data(code,emp_id,client_name,place,deal_amount,product_sold=None,barcode=None,IR_NO=None,phone_no=None,payment_type=None,payment_mode=None,cheque_no=None,bank=None,cheque_status=None,service_call_type=None):
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
from webnotes.model.doc import Document
import time
#return code
if (code[1:-1] =="SLD" or code =="SLO") and product_sold :
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
d.product_name=product_sold[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1] =="INND" or code[1:-1] =="INNO" or code[1:1] =="INU") and barcode and IR_NO :
#return barcode
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
d.ir_no=IR_NO[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1]=="AMCD" or code[1:-1]=="AMCO") and barcode:
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
#d.ir_no=IR_NO[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1]=="SED" or code[1:-1]=="SEO") and service_call_type and barcode:
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
d.service_call_type=service_call_type[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif code[1:-1]=="PR" and payment_type and payment_mode and cheque_no and bank and cheque_status and barcode:
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
#d.service_call_type=service_call_type[1:-1]
d.payment_type=payment_type[1:-1]
d.payment_mode=payment_mode[1:-1]
d.cheque_no=cheque_no[1:-1]
d.cheque_bank=bank[1:-1]
d.cheque_status=cheque_status[1:-1]
d.barcode=barcode[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
d.save()
webnotes.conn.commit()
return d.name
elif (code[1:-1]=="DC") and phone_no and product_sold:
#return phone_no[-11:]
d=Document('Activity Data')
d.activity_id=d.name
d.activity_type=code[1:-1]
d.emp_id=emp_id[1:-1]
d.client_name=client_name[1:-1]
d.place=place[1:-1]
d.activity_date=today
#d.service_call_type=service_call_type[1:-1]
d.product_name=product_sold[1:-1]
d.activity_time=time.strftime("%H:%M:%S")
d.amount=deal_amount[1:-1]
c=phone_no[-11:]
d.phone_no=c[1:-1]
d.save()
webnotes.conn.commit()
return d.name
else:
"Last"
@webnotes.whitelist(allow_guest=True)
def get_escalation_for_supportticket(_type='Post'):
#print "get esc"
#val = ''
from webnotes.utils import cstr
aa="select distinct(subdate(CURDATE(), 1)) from `tabHoliday` where subdate(CURDATE(), 1) not in (select holiday_date from `tabHoliday` where parent='2014-2015/Maharashtra/001')"
res=webnotes.conn.sql(aa)
s=Document('Support Ticket')
j=0
#print res
if res:
#print "in res "
for i in range (2,15):
#print "i"
bb="select distinct(subdate(CURDATE(), "+cstr(i)+")) from `tabHoliday`"
#print bb
res1=webnotes.conn.sql(bb)
if res1:
cc="select distinct(subdate(CURDATE(), 1)) from `tabHoliday` where '"+cstr(res1[0][0])+"' in (select holiday_date from `tabHoliday` where parent='2014-2015/Maharashtra/001')"
#print cc
res2=webnotes.conn.sql(cc)
if res2:
#print "old j"
#print j
j=j+24
#print "new j"
#print j
else:
print "breaning "
break
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
qry1="select name from `tabSupport Ticket` t where t.status='Open' and t.creation < DATE_SUB(NOW(), INTERVAL 24+"+cstr(j)+" HOUR) AND t.creation > DATE_SUB(NOW(), INTERVAL 48+"+cstr(j)+" HOUR)"
#print qry1
qry=webnotes.conn.sql(qry1,as_list=1);
webnotes.errprint("in 24 "+cstr(qry))
if qry:
for [k] in qry:
s=Document('Support Ticket')
webnotes.errprint(k)
p=webnotes.conn.sql("select territory from `tabSupport Ticket` where name='"+k+"'")
#webnotes.errprint(p)
w=webnotes.conn.sql("select y.parent from `tabDefaultValue` y,`tabProfile` p, `tabUserRole` r where defkey = '%s' and defvalue = '%s' and r.role='Manager' and y.parent=p.name and r.parent=p.name"%('territory',p[0][0]))
#webnotes.errprint(w[0][0])
ee="update `tabSupport Ticket` set assigned_to='',assigned_to_higher_level='"+cstr(w[0][0])+"' where name='"+cstr(k)+"'"
#print ee
webnotes.conn.sql(ee)
webnotes.conn.commit()
#msg1 = ""
webnotes.errprint("Updated")
flg = webnotes.conn.sql("select flag from `tabSupport Ticket` where name ='"+cstr(k)+"'")
if flg[0][0]=="not":
em=w[0][0]
msg9="Support Ticket '"+k+"' assigned to you...Please check it."
sendmail(em, subject='Support Ticket Alert', msg = msg9)
ss="update `tabSupport Ticket` set flag='fst' where name ='"+cstr(k)+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
qr=webnotes.conn.sql("select name from `tabSupport Ticket` t where t.status='Open' and t.creation < DATE_SUB(NOW(), INTERVAL 48+"+cstr(j)+" HOUR) AND t.creation > DATE_SUB(NOW(), INTERVAL 72+"+cstr(j)+" HOUR)",as_list=1)
webnotes.errprint("in 48 "+cstr(qr))
if qr:
for [l] in qr:
webnotes.errprint(l)
q=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='National Manager' and r.parent=p.name")
#print q
ff="update `tabSupport Ticket` set assigned_to='',assigned_to_higher_level='"+cstr(q[0][0])+"' where name='"+cstr(l)+"'"
#print ff
webnotes.conn.sql(ff)
webnotes.conn.commit()
webnotes.errprint("Updated")
flg = webnotes.conn.sql("select flag from `tabSupport Ticket` where name ='"+cstr(l)+"'")
if flg[0][0]=="fst":
msg10="Support Ticket '"+l+"' assigned to you...Please check it."
em=q[0][0]
sendmail(em, subject='Support Ticket Alert', msg = msg10)
ss="update `tabSupport Ticket` set flag='snd' where name ='"+cstr(l)+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
qs=webnotes.conn.sql("select name from `tabSupport Ticket` t where t.status='Open' and t.creation < DATE_SUB(NOW(), INTERVAL 72+"+cstr(j)+" HOUR) AND t.creation > DATE_SUB(NOW(), INTERVAL 100+"+cstr(j)+" HOUR)",as_list=1);
webnotes.errprint("in 72 "+cstr(qs))
if qs:
for [m] in qs:
s=Document('Support Ticket')
webnotes.errprint(m)
qa=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='COO' and r.parent=p.name")
qd=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='CEO' and r.parent=p.name")
qtt=webnotes.conn.sql("update `tabSupport Ticket` set assigned_to='"+qa[0][0]+"',assigned_to_higher_level= '"+qd[0][0]+"' where name='"+m+"'")
webnotes.conn.commit()
webnotes.errprint("Updated")
flg = webnotes.conn.sql("select flag from `tabSupport Ticket` where name ='"+cstr(m)+"'")
if flg[0][0]=="snd":
msg11="Hello, Support Ticket '"+m+"' assigned to you...Please check it."
em=qa[0][0]+","+qd[0][0]
sendmail(em, subject='Support Ticket Alert', msg = msg11)
ss="update `tabSupport Ticket` set flag='thrd' where name ='"+cstr(m)+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
@webnotes.whitelist(allow_guest=True)
def get_payment_followup():
#from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
#from import datetime,date,timedelta
i = datetime.now()
p=i.strftime('%Y-%m-%d')
webnotes.errprint(p)
qry=webnotes.conn.sql("select name from `tabSales Invoice` where outstanding_amount>0",as_list=1)
for [i] in qry:
qr=webnotes.conn.sql("select installation from `tabSales Invoice` where name='"+i+"'",as_list=1)
# webnotes.errprint(qr)
if qr:
q=webnotes.conn.sql("select inst_date,employee_id from `tabInstallation Note` where name='"+qr[0][0]+"'")
#webnotes.errprint([q,"qqqq"])
# webnotes.errprint(q[0][1])
y=webnotes.conn.sql("select grand_total_export from `tabSales Invoice` where name='"+qry[0][0]+"'",as_list=1)
# webnotes.errprint(y)
v=webnotes.conn.sql("select outstanding_amount,customer from `tabSales Invoice` where name='"+qry[0][0]+"'",as_list=1)
# webnotes.errprint(v)
paid=flt(y[0][0]-v[0][0])
if v:
customer_type=webnotes.conn.get_value('Customer',v[0][1],'customer_type')
if customer_type=='OEM':
credit_days=webnotes.conn.get_value('Customer',v[0][1],'credit_days')
elif customer_type:
credit_days=webnotes.conn.get_value('Global Defaults',None,'customer_credit_days')
if not credit_days:
credit_days=0
#webnotes.errprint(["credit_days is here",credit_days])
if q:
webnotes.errprint(q)
s=q[0][0].strftime('%Y-%m-%d')
a=getdate(p)
e=cint((getdate(p) - getdate(s)).days)
if e== cint(credit_days):
webnotes.errprint("in e")
z=webnotes.conn.sql("select cell_number,user_id from `tabEmployee` where name='"+q[0][1]+"'")
webnotes.errprint(z)
ss=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='Manager' and r.parent=p.name")
webnotes.errprint(ss)
if ss:
qq=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='Manager'")
webnotes.errprint(qq)
dic1={
'Sales Invoice No':qry[0][0],
'Installation Date':s,
'Grand Total':y[0][0],
'Outstanding Amount':v[0][0],
'Paid Amount Till date': paid
}
#webnotes.errprint(flt(y[0][0]))
msg="Dear Sir,sales Invoice No= '"+qry[0][0]+"' ,Installation Date='"+s+"',Total Amount for specified Sales Invoice is='"+cstr(y[0][0])+"', And Outstanding Amount='"+cstr(v[0][0])+"',And Paid Amount Till Date='"+cstr(paid)+"' "
webnotes.errprint(msg)
p=self.send_sms(z[0][0],msg)
q=self.send_sms(qq[0][0],msg)
r=self.send_email(z[0][1],msg)
s=self.send_email(ss[0][0],msg)
#x=self.send_email(z[0][1],msg)
#webnotes.errprint(qry[0][0])
elif e== 22+cint(credit_days):
ss=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='National Manager' and r.parent=p.name")
webnotes.errprint(ss)
if ss:
qq=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='National Manager'",as_list=1)
#webnotes.errprint(qq)
dic1={
'Sales Invoice No':qry[0][0],
'Installation Date':s,
'Grand Total':x[0][0],
'Outstanding Amount':v[0][0],
'Paid Amount Till date':paid
}
msg ="Dear Sir,sales Invoice No= '"+qry[0][0]+"' ,Installation Date='"+s+"', Total Amount for specified sales Invoice is='"+cstr(y[0][0])+"',And Outstanding Amount='"+cstr(v[0][0])+"',And Paid Amount Till Date='"+cstr(paid)+"' "
p=send_sms(qq[0][0],msg)
q=send_email(ss[0][0],msg)
elif e>= 52+cint(credit_days):
ss=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='CEO' and r.parent=p.name")
webnotes.errprint(ss)
if ss:
qq=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='CEO'",as_list=1)
webnotes.errprint(qq)
ss1=webnotes.conn.sql("Select p.name from `tabProfile` p, `tabUserRole` r where r.role='COO' and r.parent=p.name")
webnotes.errprint(ss1)
if ss1:
qq1=webnotes.conn.sql("select cell_number from `tabEmployee` where user_id='"+ss[0][0]+"' and designation='COO'",as_list=1)
webnotes.errprint(qq1)
dic1={
'Sales Invoice No':qry[0][0],
'Installation Date':s,
'Grand Total':x[0][0],
'Outstanding Amount':v[0][0],
'Paid Amount Till date':paid
}
msg="Dear Sir,sales Invoice No= '"+qry[0][0]+"' ,Installation Date='"+s+"',Total Amount fro specified invoice is='"+cstr(y[0][0])+"',And Outstanding Amount='"+cstr(v[0][0])+"',And Paid Amount Till Date='"+cstr(paid)+"' "
p=send_sms(qq[0][0],msg)
a=send_sms(qq1[0][0],msg)
r=send_email(ss[0][0],msg)
q=send_email(ss1[0][0],msg)
else:
webnotes.errprint("in last")
@webnotes.whitelist(allow_guest=True)
def fetch_sms(_type='POST'):
aa="select id,creation,message_body,sender_no from smslog where flag=0 and sender_no is not null and message_body like '#%#'"
bb=webnotes.conn.sql(aa)
from webnotes.model.doc import Document
import datetime,time
from webnotes.utils import now,get_first_day, get_last_day, add_to_date, nowdate, getdate
#print bb
for r in bb:
cc=r[2].split(',')
dd=cc[0].upper().replace(' ','')
#print cc
#print len(cc)
if dd=='#INNO' or dd=='#INND' or dd=='#INU':
if len(cc)==7:
#print "creation "+cstr( r)+"IN"
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[2]
d.client_name=cc[4]
d.place=cc[5]
d.activity_date=now()
d.ir_no=cc[1]
d.barcode=cc[3]
e=now().split(' ')
#d.activity_time=e[1]
d.amount=cc[6].replace('#','').replace(' ','')
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
#print d.name
webnotes.conn.commit()
elif dd=='#CRT' or dd=='#CLS':
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
today = nowdate()
import time
if dd=='#CRT' and len(cc)==3:
print "crt "+cstr(r) +"CRT CLD"
qr="select customer,employee_id from `tabInstallation Note` where product_barcode='"+cc[1]+"' "
print qr
res=webnotes.conn.sql(qr)
print res
g=t=a=''
if res:
print "in if"
gg="select name,customer_name,territory from tabCustomer where name='"+res[0][0]+"'"
print gg
g=webnotes.conn.sql(gg)
print g
w="select status,user_id from `tabEmployee` where name='%s'"%(res[0][1]);
print w
t=webnotes.conn.sql(w)
print t
print "for employe"
w="select y.parent from `tabDefaultValue` y,`tabProfile` p, `tabUserRole` r where defkey = 'territory' and defvalue = '"+g[0][2]+"' and r.role='Manager' and y.parent=p.name and r.parent=p.name"
print w
a=webnotes.conn.sql(w)
d=Document('Support Ticket')
d.subject=cc[1]
d.status='Open'
#if res:
if g:
d.territory=g and g[0][2] or ''
d.customer_name=g and g[0][1] or ''
d.customer=g and g[0][0] or ''
d.raised_by=r[3]
d.opening_date=nowdate()
#e=now().split(' ')
if t:
if t[0][0] =='Left':
d.assigned_to=a[0][0]
d.assigned_to_higher_level=a[0][0]
#return t[0][1]
else:
d.assigned_to=t[0][1]
d.assigned_to_higher_level=a[0][0]
#e=now().split(' ')
#d.sender_phone_no=r[3]
#d.activity_time='01:01:01'
d.save(new=1)
webnotes.conn.commit()
print d.name
flg=webnotes.conn.sql("select flag from `tabSupport Ticket` where name = '"+d.name+"'")
#print flg
if flg[0][0]=="nott":
msg8="Hello, Support Ticket '"+d.name+"' assigned to you...Please check it."
print msg8
em=t[0][1]+","+a[0][0]
print em
sendmail(em, subject='Support Ticket Alert', msg = msg8)
ss="update `tabSupport Ticket` set flag='not' where name = '"+d.name+"'"
webnotes.conn.sql(ss)
webnotes.conn.commit()
if d.name:
p=Document('Communication')
p.parent=d.name
p.parentfield='Communications'
p.parenttype='Support Ticket'
p.content=cc[2].replace('#','')
p.subject=cc[1]
p.sender = d.raised_by
p.save(new=1)
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
webnotes.conn.commit()
elif dd=='#CLS' and len(cc)==2:
if len(cc)==2:
d=cc[1]
#print d[:-1]
#print "cls "+cstr(r)
msgg="Dear Customer,according to your request respective ticket is closed."
ee="update `tabSupport Ticket` set status='Closed' where name='"+cstr(d[:-1])+"'"
print ee
e="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
print e
print r
webnotes.conn.sql(ee)
webnotes.conn.sql(e)
webnotes.conn.commit()
no1=r[3]
no = no1.replace("+", "")
webnotes.errprint(no)
print "END SMS..."
pp=send_sms(msgg,no)
elif dd=='#SLD' or dd=='#SLO':
#print len(cc)
if len(cc)==6 :
print cc
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[2]
d.place=cc[3]
d.sender_phone_no=r[3]
d.activity_date=now()
d.product_name=cc[4]
#d.activity_time=time.strftime("%H:%M:%S")
d.amount=cc[5].replace('#','')
d.save(new=1)
webnotes.conn.commit()
#print d.name
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
webnotes.conn.commit()
elif dd=='#AMCD' or dd=='#AMCO' :
if len(cc)==6:
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[3]
d.place=cc[4]
d.activity_date=now()
#d.ir_no=IR_NO[1:-1]
d.barcode=cc[2]
#d.activity_time=time.strftime("%H:%M:%S")
d.amount=cc[5]
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name :
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
webnotes.conn.commit()
elif dd=="#SED" or dd=="#SEO" :
if len(cc)==6 :
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[3]
d.place=cc[4]
d.activity_date=now()
d.service_call_type=cc[5].replace('#','')
d.barcode=cc[2]
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
print d.name
webnotes.conn.commit()
elif dd=="#PR":
if len(cc)== 11:
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[3]
d.place=cc[4]
d.activity_date=now()
#d.service_call_type=service_call_type[1:-1]
d.payment_type=cc[5]
d.payment_mode=cc[7]
d.cheque_no=cc[8]
d.cheque_bank=cc[9]
d.cheque_status=cc[10].replace('#','')
d.barcode=cc[2]
#d.activity_time=time.strftime("%H:%M:%S")
d.amount=cc[6]
d.sender_phone_no=r[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
print d.name
webnotes.conn.commit()
elif dd=="#DC":
#print "creation for dc need 6 fields "+cstr(cc)
if len(cc)==6:
#return phone_no[-11:]
d=Document('Activity Data')
#d.activity_id=d.name
d.activity_type=dd[1:]
d.emp_id=cc[1]
d.client_name=cc[2]
d.place=cc[4]
d.activity_date=now()
d.sender_phone_no=r[3]
#d.service_call_type=service_call_type[1:-1]
d.product_name=cc[5].replace('#','')
#d.activity_time=time.strftime("%H:%M:%S")
#d.amount=deal_amount[1:-1]
d.phone_no=cc[3]
d.save(new=1)
webnotes.conn.commit()
f=Document('Activity Data',d.name)
f.activity_id=d.name
f.save()
if d.name:
ee="update smslog set flag=1 where id='"+cstr(r[0])+"' and flag=0"
g=webnotes.conn.sql(ee)
print d.name
webnotes.conn.commit()
@webnotes.whitelist(allow_guest=True)
def posting():
from werkzeug.wrappers import Request, Response
return request.form['username']
#return "hi"
@webnotes.whitelist(allow_guest=True)
def get_post(data,_type='POST'):
from webnotes.utils import get_first_day, get_last_day, add_to_date, nowdate, getdate
from webnotes.model.doc import Document
import time
abc=json.loads(data)
aa=Document('Installation Note')
aa.customer=abc['customer_id']
aa.customer_address=abc['address']
aa.address_display=abc['address']
aa.contact_person=abc['contact_person']
aa.employee_id=abc['employee_no']
aa.internal_order_no=abc['iof_no']
aa.contact_email=abc['email']
aa.contact_mobile=abc['phone']
aa.clinic_name=abc['clinic_name']
aa.doctor_name=abc['doctor_name']
aa.city=abc['city']
aa.pincode=abc['pincode']
aa.director_name=abc['director_name']
aa.state=abc['state']
aa.reg_no_clinic=abc['reg_no_clinic']
aa.reg_no_doctor=abc['reg_no_doctor']
aa.website=abc['website']
aa.palce=abc['palce']
#aa.inst_date=abc['date_of_installation'].strftime('%Y-%m-%d')
aa.employee_name=abc['employee_name']
aa.inst_reprot_no=abc['inst_reprot_no']
aa.user_name=abc['user_name']
aa.dept=abc['dept']
aa.contact_mobile=abc['contact_no']
aa.dept1=abc['dept1']
aa.contact_no1=abc['contact_no1']
aa.product_barcode=abc['product_barcode']
aa.version=abc['version']
aa.material_supplied=abc['material_supplied']
aa.inst_start_time=abc['inst_start_time']
aa.inst_date=abc['inst_date']
aa.inst_end_time=abc['inst_end_time']
aa.inst_end_date=abc['inst_end_date']
aa.proc=abc['proc']
aa.ram=abc['ram']
aa.hdd=abc['hdd']
aa.me=abc['me']
aa.other=abc['other']
aa.model_no=abc['model_no']
aa.serial_no=abc['serial_no']
aa.os=abc['os']
aa.inst_type=abc['inst_type']
aa.no_in_case=abc['no_in_case']
aa.training=abc['training']
aa.customer_remark=abc['customer_remark']
aa.engineers_remark=abc['engineers_remark']
aa.status1=abc['status']
aa.signature=abc['signature']
aa.sign_seal=abc['sign_seal']
aa.save(new=1)
webnotes.conn.commit()
return aa.name
@webnotes.whitelist(allow_guest=True)
def get_customer_detail(customer_id):
qr="select customer_no,email from tabCustomer where name="+customer_id
res=webnotes.conn.sql(qr)
customerobj= {}
for r in res:
customerobj['phone'] = r[0]
customerobj['email'] = r[1]
customerobj['clinic_name'] = ''
customerobj['address'] = ''
customerobj['doctor_name'] = ''
customerobj['city'] = ''
customerobj['pincode'] = ''
customerobj['director_name'] = ''
customerobj['state'] = ''
customerobj['email'] = ''
customerobj['reg_no_clinic'] = ''
customerobj['reg_no_doctor'] = ''
customerobj['website'] = ''
return customerobj
@webnotes.whitelist(allow_guest=True)
def get_item_detail(barcode):
qr="select name,item_code,description from `tabSerial No` limit 5"
res=webnotes.conn.sql(qr)
itemsobj= {}
itemlist = []
for r in res:
itemobj={}
itemobj['barcode'] = r[0]
itemobj['description'] = r[1]
itemobj['details'] = r[2]
itemlist.append(itemobj)
return itemlist
@webnotes.whitelist(allow_guest=True)
def send_sales_details():
print "sales details"
from webnotes.utils.email_lib import sendmail
qr="select a.territory,b.item_code,sum(b.qty) as qty,sum(b.export_amount) as amt from `tabSales Order Item` b,`tabSales Order` a where a.name=b.parent group by b.item_code"
res=webnotes.conn.sql(qr)
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Product</td> <td>Quantity</td><td>Total Amount</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSales Order` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
for rr in res:
msg1=''
bb="select ifnull(a.territory,''),ifnull(b.item_code,''),ifnull(sum(b.qty),''),ifnull(sum(b.export_amount),'') from `tabSales Order Item` b,`tabSales Order` a where DATE(a.creation)=CURDATE() and a.name=b.parent and a.territory='"+rr[0]+"' group by b.item_code "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
#print "------------------- region"
#print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
sendmail('[email protected]', subject='Regional Sales Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
if res1:
sendmail('[email protected]', subject="sales alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_ticket_details():
print "ticket"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Total Tickets Created</td> <td>Total Tickets Closed</td><td>Total Open Tickets</td><td>Total Paid Tickets</td><td>Total Paid Tickets Amount</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSupport Ticket` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="SELECT ifnull(a.territory,''),count(a.name),(select count(a.name) FROM `tabSupport Ticket` a WHERE DATE(a.creation)=CURDATE() and a.territory='"+cstr(rr[0])+"' and a.status='Closed' group by a.territory),(select count(a.name) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' and a.status<>'Closed' group by a.territory),(select count(a.name) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' and a.is_paid='Yes' group by a.territory),(select sum(amount) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' and a.is_paid='Yes' group by a.territory) FROM `tabSupport Ticket` a WHERE a.territory='"+cstr(rr[0])+"' group by a.territory "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
#print "------------------- region"
#print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
sendmail('[email protected]', subject='Regional Support Ticket Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
if res1:
sendmail('[email protected]', subject="Support Ticket Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_isbpl_details():
print "item sold below pricelist"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Sales Order</td><td>Customer</td><td>Product</td><td>Price List Rate</td><td>Sold Rate</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSales Order` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="select a.territory,a.name,a.customer,b.item_code,b.ref_rate,b.export_rate from `tabSales Order Item` b,`tabSales Order` a where DATE(a.creation)=CURDATE() and a.name=b.parent and b.ref_rate <> b.export_rate and b.ref_rate != 0 and a.territory='"+cstr(rr[0])+"' order by a.name "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td><td>"+cstr(rs[5])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
print "------------------- region"
print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
print "res in send mail"
sendmail('[email protected]', subject='Regional Items Sold Below Price List Rate Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
print msg1
if res1:
sendmail('[email protected]', subject="Items Sold Below Price List Rate Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_oppt_details():
print "old oppts"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Employee</td><td>Opportunity</td><td>LEAD/Customer</td><td>Created Before days</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabOpportunity` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="select a.territory,a.owner,a.name,CASE a.enquiry_from WHEN 'Customer' THEN a.customer ELSE a.lead END,DATEDIFF(CURDATE(),DATE(a.creation)) from `tabOpportunity` a where DATEDIFF(CURDATE(),DATE(a.creation))>=25 and status<> 'Quotation' and a.territory='"+rr[0]+"'order by a.owner,a.territory "
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
print "------------------- region"
print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
print "res in send mail"
sendmail('[email protected]', subject='Regional Not Converted Opportunities Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
print msg1
if res1:
sendmail('[email protected]', subject="Not Converted Opportunities Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_invoice_details():
print "invoice not created"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Region</td><td>Employee</td><td>Sales Oder</td><td>Customer ID</td><td>Customer Name</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct territory from `tabSales Order` where territory is not null order by territory"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
msg1=''
bb="select territory,owner,name,customer,customer_name from `tabSales Order` where territory='"+rr[0]+"' and name not in (select distinct(sales_order) from `tabSales Invoice Item` where sales_order is not null) order by territory,owner"
#print bb
res1=webnotes.conn.sql(bb)
for rs in res1:
#print rs
#print msg
msg=msg+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
msg1=msg1+"<tr><td>"+cstr(rs[0])+"</td><td>"+cstr(rs[1])+"</td><td>"+cstr(rs[2])+"</td><td>"+cstr(rs[3])+"</td><td>"+cstr(rs[4])+"</td></tr>"
#print msg
msg2=start+""+cstr(msg1)+" "+end
print "------------------- region"
print msg2
cc="SELECT p.name,y.defkey,y.defValue from `tabProfile` p, `tabUserRole` r, `tabDefaultValue` y where r.role='Regional Manager' and y.defkey='territory' and y.defvalue='"+rr[0]+"' and r.parent=p.name and p.name=y.parent"
#print cc
res3=webnotes.conn.sql(cc)
for r in res3:
if res1:
print "res in send mail"
sendmail('[email protected]', subject='Regional Invoices Not Created Alert', msg = msg2)
msg3=start+""+cstr(msg)+" "+end
print msg1
if res1:
sendmail('[email protected]', subject="Invoices Not Created Alert", msg = msg3)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_amccmc_details():
print "amc cmc"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >AMC/CMC Details</td><td>Asset Name </td><td>AMC/CMC Expiring Date</td></tr>"""
end="""</table></body></html>"""
aa="""select b.amc_details,a.item_code,datediff(date(b.expiry_date),CURDATE()), b.start_date,b.expiry_date from `tabAMC Details` b,`tabItem` a where a.name=b.parent and expiry_date in(select max(expiry_date) from `tabAMC Details` where parent=b.parent) and datediff(date(b.expiry_date),CURDATE())<=15"""
res=webnotes.conn.sql(aa)
msg=''
print res
for rr in res:
print rr
print msg
msg=msg+"<tr><td>"+cstr(rr[0])+"</td><td>"+cstr(rr[1])+"</td><td>"+cstr(rr[4])+"</td></tr>"
print msg
msg1=start+""+cstr(msg)+" "+end
print msg1
if res:
sendmail('[email protected]', subject="AMC/CMC Expiring Alert", msg = msg1)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_todays_material_details():
#print "todays_material_"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Purchase Order</td><td>Product </td><td>Quantity</td></tr>"""
end="""</table></body></html>"""
aa="""select a.name,b.item_code,b.schedule_date,b.qty from `tabPurchase Order`a,`tabPurchase Order Item`b where a.name not in(select d.prevdoc_docname from `tabPurchase Receipt`c,`tabPurchase Receipt Item`d where d.schedule_date=CURDATE() and d.parent=c.name) and b.schedule_date=CURDATE() and b.parent=a.name"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
#print rr
#print msg
msg=msg+"<tr><td>"+cstr(rr[0])+"</td><td>"+cstr(rr[1])+"</td><td>"+cstr(rr[3])+"</td></tr>"
#print msg
msg1=start+""+cstr(msg)+" "+end
if res:
sendmail('[email protected]', subject="Todays Expected Material Not Received Alert", msg = msg1)
return "done"
@webnotes.whitelist(allow_guest=True)
def send_low_stock_details():
print "low stock"
from webnotes.utils.email_lib import sendmail
start="""<html><head><style>table,th,td{border:1px solid black;border-collapse:collapse;}</style></head><table style="width:100%";><tbody><tr style="background-color:Lime;color:white;"><td >Product</td><td>Warehouse </td><td>Actual Quantity in Warehouse</td><td>Minimum Quantity level</td></tr>"""
end="""</table></body></html>"""
aa="""select distinct a.item_code,a.warehouse,a.actual_qty,b.re_order_level from `tabBin`a,`tabItem`b where a.actual_qty<=b.re_order_level and b.re_order_level!=0"""
res=webnotes.conn.sql(aa)
msg=''
#print res
for rr in res:
#print rr
#print msg
msg=msg+"<tr><td>"+cstr(rr[0])+"</td><td>"+cstr(rr[1])+"</td><td>"+cstr(rr[2])+"</td><td>"+cstr(rr[3])+"</td></tr>"
#print msg
msg1=start+""+cstr(msg)+" "+end
if res:
sendmail('[email protected]', subject="Minimum Stock Level Reached Alert", msg = msg1)
return "done"
@webnotes.whitelist(allow_guest=True)
def GetVerify(verificationCode):
return '0^232322422'
@webnotes.whitelist(allow_guest=True)
def GetEmployee(sessionCode,empID):
aa="select employee_name from tabEmployee where name="+empID
res=webnotes.conn.sql(aa)
if res:
return '0^'+res[0][0]
else:
return "Employee not found for employee ID "+empID
@webnotes.whitelist(allow_guest=True)
def GetProducts(sessionCode,instType,customerID):
if sessionCode:
return '0^53424423423'
else:
return "1^invalid session code"
@webnotes.whitelist(allow_guest=True)
def GetInstDetails(sessionCode,instType,prodBarCode):
if sessionCode:
return '0^shree clinic^deccan pune^Dr.Metgud^pune^411004^Dr. Sanjay Joshi^Maharashtra^[email protected]^9822012345^www.sanjayjoshi.com^MH/REG/CL/21232^MH/REG/DR/212323^IN00004^ScanDoc^IOF-00003^2242423~3423424545~553534434~353r445345~3434434'
else:
return "1^invalid session code"
@webnotes.whitelist(allow_guest=True)
def SetRegister(sessionCode,instType,customerID,prodBarCode,empID,prodName,prodVersion,iofNumber,instReportNumber,contactPersonsOnSite,mateBarCode):
if sessionCode:
return '0^IN00004'
else:
return "1^invalid session code"
| agpl-3.0 | -3,199,338,655,356,066,000 | 42.427785 | 684 | 0.564662 | false |
Lydwen/Mr.Statamutation | Mr.Statapython/statapython/__main__.py | 1 | 4758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import webbrowser
from .statareport import Reporting
from .stataspoon import MutationsTester
from .statautils import Directory, Logger
from .stataxml import ConfigParser
""" Report filename """
REPORT_FILENAME = 'statam_report.html'
def main(args):
"""
Main function.
:param args: command-line arguments
"""
if args.project:
# Go to project directory
os.chdir(os.path.abspath(args.project))
# Check if Spoon need to be applied
if not args.disable_spoon:
# Clean the report directory
Logger.log('Pre-cleaning report directory "%s"' % args.report_directory, True)
pre_clean(args.report_directory)
# Load the configuration
Logger.log('Load mutations configuration "%s"' % args.mutations_config, True)
mutations_config = ConfigParser.parse(args.mutations_config)
# Create mutator tester, and execute original tests
mutator = MutationsTester(args.tests_directory, args.report_directory, not args.keep_temp)
mutator.process(args.original)
# Get all mutations
mutations = mutations_config['mutations']['mutation']
if not isinstance(mutations, (list, tuple)): mutations = (mutations,) # Bind to list
# Execute every mutations
for mutation in mutations:
mutator.process(mutation['name'],
mutation['processors'].get('processor', ()) if mutation.get('processors', None) else (),
mutation.get('selector', ()))
# Check if report generation is enabled
if not args.disable_report:
# Compute reporting
report_file = os.path.join(args.report_directory, REPORT_FILENAME)
report_abspath = os.path.abspath(report_file)
Logger.log('=============== Generating report ===============', True)
Reporting(args.report_directory, args.original).report(report_file)
Logger.log('Report accessible at: %s' % report_abspath)
# Open in browser if asked to
if args.open_browser:
Logger.log('Opening report file in browser...')
webbrowser.open(report_abspath)
def pre_clean(directory):
"""
Pre-clean the project.
:param directory: report directory
:return:
"""
# Clean directory
try:
Directory.delete(directory)
except:
Logger.log('[Warning] Error on cleaning report directory ("%s")' % directory)
# Create a new one
try:
Directory.create(directory)
except:
Logger.log('[Warning] Error on creating report directory ("%s")' % directory)
def get_parser():
"""
Initialize command-line parser with default and optional arguments.
:return: parser
"""
# Enable command-line parsing
parser = argparse.ArgumentParser()
# Optional arguments
parser.add_argument('-p', '--project',
help='project main directory')
parser.add_argument('-m', '--mutations-config',
help='mutations configuration file',
default='./statamutations.xml')
parser.add_argument('-r', '--report-directory',
help='report output directory (generated report)',
default='./target/statam-report')
parser.add_argument('-t', '--tests-directory',
help='tests directory (output when tests are executed)',
default='./target/surefire-reports')
parser.add_argument('-g', '--original',
help='original (not mutated) tests directory',
default='_original_')
parser.add_argument('-k', '--keep-temp',
help='enable/disable temporary file cleaning',
action='store_true')
parser.add_argument('-o', '--open-browser',
help='open the report file in the default browser after generation',
action='store_true')
parser.add_argument('--disable-spoon',
help='disable Spoon (only the report will be computed)',
action='store_true')
parser.add_argument('--disable-report',
help='disable report generation (only Spoon will be applied)',
action='store_true')
return parser
# Main execution
if __name__ == "__main__":
# Enable logging
Logger.ENABLED = True
Logger.log('=============== <3 - Welcome in Mr.Statamutation project - <3 ===============', True)
# Start the main
sys.exit(main(
# Parse command-line args
get_parser().parse_args()
))
| mit | -4,865,774,189,952,201,000 | 32.744681 | 116 | 0.592266 | false |
hosiet/flasktex | deprecated/texworker.py | 1 | 10590 | #!/usr/bin/env python3
#
# texworker.py -- background worker for flasktex
#
# This file is part of flasktex.
#
# Copyright (c) 2015, Boyuan Yang <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Background worker for latex
"""
import subprocess
import os, sys
import shutil
import signal
import sqlite3
import time
import syslog
import multiprocessing
from flasktex.config import ft_getconfig
# CONFIG
DEFAULT_RENDERER = ft_getconfig("DEFAULTRENDERER")
DEFAULT_TIMEOUT = ft_getconfig("WORKERTIMEOUT")
DATABASE_NAME = ft_getconfig("DATABASENAME")
DATABASE_PATH = ft_getconfig("DATABASEPATH")
assert DEFAULT_RENDERER
assert DEFAULT_TIMEOUT
assert DATABASE_NAME
assert DATABASE_PATH
class TexWorker():
"""
A background worker to automatically finish the work.
Will indeed daemonize by double-fork.
Will end-up in given seconds.
"""
def __init__(self, rawstring, renderer=DEFAULT_RENDERER,
timeout=DEFAULT_TIMEOUT, db=DATABASE_NAME,
path=DATABASE_PATH, args=None):
self.rawstring = rawstring # Have to be UTF-8 String.
assert hasattr(self.rawstring, 'encode')
self.renderer = renderer
self.timeout = int(timeout) # XXX: Have to be integer
self.conn_filepath = path + db
self.popen = None
self.workid = None
self.result = None
conn = sqlite3.connect(path+db)
c = conn.cursor()
# Write to be in waiting line
start_time = str(time.time())
print('start_time is now {}.'.format(start_time))
c.execute('INSERT INTO `work` VALUES (?,?,?,?,?,?,?,?);',
(None, None, self.rawstring, None, start_time, None,
'R', None))
conn.commit()
found = False
for i in c.execute('SELECT `id` FROM `work` WHERE starttime=?', (start_time,)):
found = True
self.workid = i[0];
print('the workid is {}.'.format(self.workid))
break
if not found:
raise Exception('WORKER_NOT_FOUND_IN_DATABASE')
try:
conn.close()
except Exception:
pass
return
@staticmethod
def daemonize():
"""
Daemonize with special treatments.
If return false, we are still in original process.
If return true, we are in the daemon process.
"""
syslog.syslog('Beginning to daemonize...')
try:
pid = os.fork()
if pid > 0:
return False # return to flask worker
except OSError as e:
syslog.syslog('OSError1!')
raise
os.chdir("/")
os.setsid()
os.umask(0)
sys.stdout.flush()
sys.stderr.flush()
# Closing all opened file descriptors
MAXFD = os.sysconf("SC_OPEN_MAX")
for i in range(0, MAXFD):
try:
os.close(i)
except:
pass
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError as e:
syslog.syslog('OSError2!')
raise
si = open("/dev/null", 'r')
so = open("/dev/null", 'w')
se = open("/dev/null", 'w')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
syslog.syslog('daemonize finished. pid is {}.'.format(os.getpid()))
return True
def __startup(self):
# Switch to working dir and make a subprocess.Popen object for working
# After that, wait for timeout.
syslog.syslog('at the beginning of __startup().')
os.chdir("/tmp/")
tempdir = None
try:
tempdir = subprocess.check_output(['mktemp', '-d', 'flasktex.XXXXXXXXXX'], timeout=3).decode('UTF-8').strip()
except subprocess.TimeoutExpired:
syslog.syslog('Exception: subprocess.TimeoutExpired.')
raise
os.chdir("./{}/".format(tempdir))
# Write input file as `input.tex'
f = open('input.tex', 'wb')
f.write(self.rawstring.encode('UTF-8'))
f.close()
syslog.syslog('after writing input.tex')
# Form the Popen object, start the process, log in SQLite database
try:
syslog.syslog('Now renderer: {}'.format(self.renderer))
# XXX: use xelatex by force
self.renderer = "xelatex"
self.popen = subprocess.Popen(['latexmk',
'-pdflatex="{} {} %O %S"'.format(self.renderer, '-halt-on-error'),
'-xelatex',
'input.tex'], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
syslog.syslog('we have started subporcess.')
try:
conn = sqlite3.connect(self.conn_filepath)
conn.execute('UPDATE `work` SET `status`=? WHERE `id`={};'.format(self.workid), ('R',))
conn.commit()
conn.close()
except Exception as e:
syslog.syslog('Houston, we had a problem.')
raise
syslog.syslog('after writing running state.')
(stdout_data, stderr_data) = self.popen.communicate(input=None, timeout=self.timeout)
# XXX: here reads all the data
syslog.syslog("The stdout_data is: {}.".format(str(stdout_data)))
syslog.syslog("The stderr_data is: {}.".format(str(stderr_data)))
except Exception as e:
raise
def __cleanup(self, success=False):
conn = sqlite3.connect(self.conn_filepath)
c = conn.cursor()
c.execute('BEGIN TRANSACTION;')
if success:
# write pdf and status into database
try:
f = open('input.pdf', 'rb')
except OSError:
raise
c.execute('UPDATE `work` SET `output`=?, `status`=? WHERE `id`={};'.format(self.workid),
(f.read(), 'S'))
f.close()
else:
if self.popen.returncode == None:
# terminate the Process first
self.popen.terminate()
self.result = 'X' # TIME_EXCEEDED
else:
self.result = 'E' # ERROR_HAPPENED
c.execute('UPDATE `work` SET `status`=? WHERE `id`={};'.format(self.workid),
(self.result,))
# write log and stoptime into database
try:
f = open('input.log', 'r')
except OSError:
raise
c.execute('UPDATE `work` SET `log`=?, `stoptime`=? WHERE `id`={};'.format(self.workid),
(f.read(), str(time.time())))
# close database connection
conn.commit()
conn.close()
# remove the temp dir
syslog.syslog('removing working dir...')
cwd = os.getcwd()
assert cwd.split('.')[0] == '/tmp/flasktex'
os.chdir('..')
shutil.rmtree(cwd)
syslog.syslog('end of __cleanup(). status is {}.'.format(str(success)))
return
def __terminate_handler(self, signum, stackframe):
"""
The final method to be called, then do sys.exit().
"""
syslog.syslog('entered handler with signum of {}.'.format(signum))
signal.alarm(0)
syslog.syslog('after signal.')
if self.popen == None or self.popen.returncode != 0:
syslog.syslog('entering __cleanup, not successful.')
self.__cleanup(success=False)
syslog.syslog('worker exiting with num -1.')
sys.exit(-1)
else:
syslog.syslog('entering __cleanup, not successful.')
self.__cleanup(success=True)
syslog.syslog('worker exiting with num 0.')
sys.exit(0)
def _do_work(self):
"""
The uppermost method to finish TeXWork.
* Use SIGALRM to set timeout.
If SIGALRM is received, consider that the worker
is taking too much time, then gracefully exit and
record the task as failed.
"""
syslog.syslog('entering _do_work().')
try:
signal.signal(signal.SIGALRM, self.__terminate_handler)
signal.signal(signal.SIGTERM, self.__terminate_handler)
signal.alarm(self.timeout)
except Exception as e:
syslog.syslog(str(e.args))
self.__startup()
syslog.syslog('successfully finished the work within time.')
signal.alarm(0)
self.__cleanup(success=True)
def run(self):
"""
Start the TeXWorker in a daemonized process.
Using the twice-fork magic.
Calling this method will return immediately in original process.
"""
# Daemonize and continue the work.
if not self.daemonize():
return
# run the work.
syslog.syslog('This is worker daemon and we will now begin the work.')
self._do_work()
# shall never reach.
return
# vim: set ts=8 sw=4 tw=0 et :
| bsd-3-clause | 2,271,618,513,058,493,200 | 35.020408 | 121 | 0.586119 | false |
luisza/async_notifications | async_notifications/mail_utils.py | 1 | 1362 | # encoding: utf-8
'''
Free as freedom will be 26/9/2016
@author: luisza
'''
from __future__ import unicode_literals
from .settings import (NOTIFICATION_USER_MODEL, USER_LOOKUP_FIELDS,
NOTIFICATION_GROUP_MODEL, GROUP_LOOKUP_FIELDS)
from .utils import extract_emails, get_model
#from django.contrib.auth.models import User, Group
User = get_model(NOTIFICATION_USER_MODEL)
Group = get_model(NOTIFICATION_GROUP_MODEL)
def get_mails_from_group(group_name):
name = group_name.replace("@group", "").replace("__", " ")
group = Group.objects.get(**{GROUP_LOOKUP_FIELDS['group_lookup']: name})
email = None
# check if group has email (util with mail list approach)
name_field = GROUP_LOOKUP_FIELDS['email']
if name_field:
if hasattr(group, name_field):
email = getattr(group, name_field)
if email:
return [email]
if 'group_lookup' in USER_LOOKUP_FIELDS:
users = User.objects.filter(**{USER_LOOKUP_FIELDS['group_lookup']: name})
return [u.email for u in users]
return []
def get_all_emails(text):
if text is None:
return []
mails = extract_emails(text)
gmails = []
for mail in mails:
if "@group" in mail:
mails.remove(mail)
gmails += get_mails_from_group(mail)
mails += gmails
return set(mails)
| gpl-2.0 | 476,110,698,621,409,200 | 27.375 | 81 | 0.637298 | false |
colossalbit/cssypy | cssypy/utils/useroptions.py | 1 | 11311 | from __future__ import absolute_import
from __future__ import print_function
import os.path
import argparse
import ConfigParser as configparser
import collections
import __builtin__
import six
from . import reporters
from .. import defs
#==============================================================================#
_true_strings = set('true t yes y 1 on enable'.split())
_false_strings = set('false f no n 0 off disable'.split())
def string_bool(val):
val = val.lower()
if val in _true_strings:
return True
elif val in _false_strings:
return False
else:
raise ValueError('String is not a known boolean value.')
#==============================================================================#
class OptionDef(object):
def __init__(self, name, argnames=None, default=None, type=None,
choices=None, help=None, argparser_kwargs=None,
cmdline_option=True, file_option=True, list=False,
is_argument=False, metavar=None, dest=None, action=None,
cmdline_helper=False, hide=False):
assert '-' not in name
self.name = name # name used to store the option
assert argnames is None or isinstance(argnames, tuple)
self._argnames = argnames
if list:
self.default = default or __builtin__.list
else:
self.default = default
self.type = type or six.text_type
self.choices = choices
self.help = help # used only by argparser
self.list = list
self._argparser_kwargs = argparser_kwargs or {}
self.metavar = metavar
self.dest = dest
self.action = action
if cmdline_helper:
self.cmdline_option = True
self.file_option = False
self.hide = True
else:
self.cmdline_option = True if is_argument else cmdline_option
self.file_option = False if is_argument else file_option
self.hide = hide
self.is_argument = is_argument
def argparser_names(self):
if self._argnames:
return self._argnames
elif self.is_argument:
return (self.name.replace('_', '-'),)
else:
return ('--'+self.name.replace('_', '-'),)
def configfile_name(self):
return self.name.lower()
def _conv_elem(self, x):
x = self.configfile_type()(x)
if self.choices and x not in self.choices:
raise RuntimeError()
return x
def configfile_conv(self, x):
if self.list:
return [self._conv_elem(x.strip()) for x in x.split(',')]
else:
return self._conv_elem(x.strip())
def configfile_type(self):
if self.type == bool:
return string_bool
return self.type
def argparser_type(self):
if self.type == bool:
return string_bool
return self.type
def get_default(self):
if isinstance(self.default, collections.Callable):
return self.default()
return self.default
def argparser_kwargs(self):
kwargs = self._argparser_kwargs.copy()
# Do not use defaults on command line so they can be overridden by
# config file.
kwargs['default'] = argparse.SUPPRESS
if self.action not in ('store_true', 'store_false'):
kwargs['type'] = self.argparser_type()
if not self.is_argument:
if self.dest:
kwargs['dest'] = self.dest
else:
kwargs['dest'] = self.name
if self.help:
kwargs['help'] = self.help
if self.choices:
kwargs['choices'] = self.choices
if self.metavar:
kwargs['metavar'] = self.metavar
if self.action:
kwargs['action'] = self.action
if self.list:
kwargs['nargs'] = '*'
return kwargs
#==============================================================================#
class OptGroupDef(object):
def __init__(self, name, description=None):
self.name = name
self.description = description
self.optdefs = []
self.optnames = set()
def add_optdef(self, optdef):
assert optdef.name.lower() not in self.optnames
self.optdefs.append(optdef)
self.optnames.add(optdef.name.lower())
def iteroptions(self):
return (opt for opt in self.optdefs if not opt.hide)
def iter_cmdline_options(self):
return (opt for opt in self.optdefs if opt.cmdline_option)
def iter_file_options(self):
return (opt for opt in self.optdefs if opt.file_option)
#==============================================================================#
class OptionSpec(object):
def __init__(self, default_name='General', default_description=None,
usage=None, prog=None, description=None, epilog=None):
self.usage = usage
self.prog = prog
self.description = description
self.epilog = epilog
self.optdefs = []
self.groupdefs = []
self.optnames = set() # test for duplicates
self.groupnames = set() # test for duplicates
self._default_group = OptGroupDef(default_name, default_description)
self.add_groupdef(self._default_group)
def argparser_kwargs(self):
kwargs = {}
if self.usage:
kwargs['usage'] = self.usage
if self.prog:
kwargs['prog'] = self.prog
if self.description:
kwargs['description'] = self.description
if self.epilog:
kwargs['epilog'] = self.epilog
return kwargs
def add_optdef(self, optdef):
assert optdef.name.lower() not in self.optnames
self._default_group.add_optdef(optdef)
self.optnames.add(optdef.name.lower())
def add_groupdef(self, groupdef):
assert groupdef.name.lower() not in self.groupnames
for opt in groupdef.iteroptions():
assert opt.name.lower() not in self.optnames
self.optnames.add(opt.name.lower())
self.groupdefs.append(groupdef)
self.groupnames.add(groupdef.name.lower())
def get_groupdef(self, name):
lname = name.lower()
for group in self.groupdefs:
if group.name.lower() == lname:
return group
raise KeyError("OptionSpec has no OptGroupDef '{}'.".format(name))
def itergroups(self, include_default_group=True):
if include_default_group:
return iter(self.groupdefs)
else:
return iter(self.groupdefs[1:])
def iteroptions(self):
return self.groupdefs[0].iteroptions()
def _iteroptions(self):
return iter(self.groupdefs[0].optdefs)
def iter_cmdline_options(self):
return (opt for opt in self._iteroptions() if opt.cmdline_option)
def iter_file_options(self):
return (opt for opt in self._iteroptions() if opt.file_option)
#==============================================================================#
class OptionsReader(object):
def __init__(self, optspec, reporter=None):
self.reporter = reporter or reporters.NullReporter()
self.optspec = optspec
self.config_filename = None
def build_argparser(self, argparser):
# general options
for opt in self.optspec.iter_cmdline_options():
argparser.add_argument(*opt.argparser_names(),
**opt.argparser_kwargs())
# group options
for groupdef in self.optspec.itergroups(include_default_group=False):
arggroup = argparser.add_argument_group(groupdef.name,
groupdef.description)
for opt in groupdef.iter_cmdline_options():
arggroup.add_argument(*opt.argparser_names(),
**opt.argparser_kwargs())
return argparser
def get_argparser(self):
parser = argparse.ArgumentParser(**self.optspec.argparser_kwargs())
parser = self.build_argparser(parser)
return parser
def find_config_file(self, filename):
# if filename has no path, try in common locations
# if filename has path, try in common locations?
# return None if file not found, otherwise return filename
homepath = os.path.expanduser('~')
for dir in ('', homepath):
filepath = os.path.join(dir, filename)
if os.path.isfile(filepath):
return filepath
return None
def parse_config_file(self, filename=None):
# 1. Determine the file name.
filename = filename or defs.CONFIG_FILENAME
filepath = self.find_config_file(filename)
if not filepath:
m = "Config file '{0}' not found.".format(filename)
self.reporter.warning(m)
return {}
# 2. Open and parse the file
parser = configparser.ConfigParser()
try:
with open(filepath, 'r') as f:
parser.readfp(f, filepath)
except (IOError, OSError):
m = "Error trying to open config file '{0}'.".format(filename)
self.reporter.warning(m)
return {}
# 3. Read the contents into a dict.
confdict = {}
for groupdef in self.optspec.itergroups():
section = groupdef.name
for opt in groupdef.iter_file_options():
try:
val = parser.get(section, opt.configfile_name(), raw=True)
# TODO: handle exception when configfile_conv() fails
val = opt.configfile_conv(val)
confdict[opt.name] = val
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return confdict
def get_cmdline_options(self, cmdline=None):
argparser = self.get_argparser()
args = argparser.parse_args(args=cmdline)
argsdict = vars(args)
self.config_filename = argsdict.get(defs.CONFIGFILE_OPTNAME, None) or None
return argsdict
def get_file_options(self, filename=None):
confdict = self.parse_config_file(filename or self.config_filename)
return confdict
def merge_options(self, argsdict, confdict):
for opt in self.optspec.iteroptions():
if opt.name not in argsdict:
if opt.name in confdict:
argsdict[opt.name] = confdict[opt.name]
else:
argsdict[opt.name] = opt.get_default()
return argsdict
def get_options(self, cmdline=None, config_filename=None):
argsdict = self.get_cmdline_options(cmdline)
confdict = self.get_file_options(config_filename)
optdict = self.merge_options(argsdict, confdict)
return optdict
#==============================================================================#
| bsd-3-clause | 583,149,570,608,022,800 | 35.022293 | 82 | 0.548493 | false |
wettenhj/mytardis | tardis/tardis_portal/tests/test_dumpschemas.py | 1 | 1363 | import json
from django.test import TestCase
from django.core.management import call_command
from django.core.management.base import CommandError
from ..models.parameters import Schema
class DumpSchemasTestCase(TestCase):
def setUp(self):
self.schema1 = Schema(
namespace='http://www.example.com/schema1.xml',
type=Schema.DATAFILE)
self.schema1.save()
self.schema2 = Schema(
namespace='http://www.example.com/schema2.xml',
type=Schema.DATAFILE)
self.schema2.save()
def testDumpSchemas(self):
'''
Just test that we can run
./manage.py dumpschemas
without any runtime exceptions
'''
schemas = json.loads(
call_command('dumpschemas',
namespaces=['http://www.example.com/schema1.xml',
'http://www.example.com/schema2.xml']))
self.assertEqual(len(schemas), 2)
schemas = json.loads(
call_command('dumpschemas',
namespaces=['http://www.example.com/schema1.xml']))
self.assertEqual(len(schemas), 1)
with self.assertRaises(CommandError):
call_command('dumpschemas', namespaces=['invalid'])
def tearDown(self):
self.schema1.delete()
self.schema2.delete()
| gpl-3.0 | 5,470,142,584,394,264,000 | 31.452381 | 76 | 0.594277 | false |
greggyNapalm/lunaport_server | lunaport_server/plugg_views/HookRegistration.py | 1 | 5396 | # -*- encoding: utf-8 -*-
"""
lunaport.plugg_views.hook_registration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Class-based view for hook_registration resource.
hook_registration - m2m connection case with hook. Rule to starte test.
"""
import json
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from flask import jsonify, request, Response, url_for, session
from Base import BaseView
from .. dao.exceptions import StorageError
from .. dao.hook_registration import RDBMS
from .. domain.hook_registration import HookRegistrationBuilder, HookRegistrationAdaptor
class HookRegistration(BaseView):
str_params = [
'case_id',
'hook_id',
'descr',
'cfg',
]
dao = RDBMS
def get(self, hook_registration_id=None):
if hook_registration_id is None: # walk through all registrations
q = self.cmpl_query()
try:
h_regs, per_page, next_page, prev_page = self.dao.get_many(**q)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
except ValueError as e:
msg = {
'error_type': 'Business logic layer error',
'error_text': str(e),
}
return jsonify(msg), 500
if not h_regs:
return Response(status=404)
body = json.dumps(
[HookRegistrationAdaptor.to_resp(r, jsonify=False) for r in h_regs])
hdrs = {
'Content-Type': 'application/json; charset=utf-8',
'Link': self.cmpl_link_hdr(request, per_page, next_page,
prev_page),
}
return Response(response=body, status=200,
headers=hdrs)
else: # try to get single *hook_registration* entrie by id
try:
h_regs = self.dao.get_single(hook_registration_id=hook_registration_id)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
if not h_regs:
return Response(status=404)
hdrs = {'Content-Type': 'application/json; charset=utf-8'}
return Response(response=HookRegistrationAdaptor.to_resp(h_regs), status=200,
headers=hdrs)
def post(self):
try:
hook_registration = HookRegistrationBuilder.from_Flask_req(request, session)
except ValueError as e:
msg = {
'error_type': 'Malformed body attributes',
'error_text': str(e),
}
return jsonify(msg), 422
try:
hook_registration.id = self.dao.insert(hook_registration)
hook_registration = self.dao.get_single(hook_registration_id=hook_registration.id)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
except ValueError as e:
msg = {
'error_type': 'Malformed body attributes',
'error_text': str(e),
}
return jsonify(msg), 409
res_location = '{}{}'.format(url_for('hook_registration'), hook_registration.id)
return Response(response=HookRegistrationAdaptor.to_resp(hook_registration), status=201,
headers={
'Location': res_location,
'Content-Type': 'application/json; charset=utf-8'
})
def patch(self, hook_registration_id):
diff = request.json
if not diff:
msg = {
'error_type': 'Malformed body attributes',
'error_text': 'Can\'t deserialize json document',
}
return jsonify(msg), 422
try:
hook_registration = self.dao.update_by_id(hook_registration_id, diff)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
res_location = '{}{}'.format(url_for('hook_registration'), hook_registration.id)
return Response(response=HookRegistrationAdaptor.to_resp(hook_registration), status=200,
headers={
'Location': res_location,
'Content-Type': 'application/json; charset=utf-8'
})
def delete(self, hook_registration_id):
try:
self.dao.delete(hook_registration_id)
except StorageError as e:
msg = {
'error_type': 'Storage call fails',
'error_text': str(e),
}
return jsonify(msg), 500
except ValueError as e:
msg = {
'error_type': 'Malformed user provided data',
'error_text': str(e),
}
return jsonify(msg), 422
return Response(status=200)
| apache-2.0 | -1,302,585,851,489,007,900 | 34.038961 | 96 | 0.509451 | false |
redhat-openstack/glance | glance/api/v2/metadef_namespaces.py | 1 | 28184 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
import six
import six.moves.urllib.parse as urlparse
import webob.exc
from wsme.rest.json import fromjson
from wsme.rest.json import tojson
from glance.api import policy
from glance.api.v2.model.metadef_namespace import Namespace
from glance.api.v2.model.metadef_namespace import Namespaces
from glance.api.v2.model.metadef_object import MetadefObject
from glance.api.v2.model.metadef_property_type import PropertyType
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance.common import wsme_utils
import glance.db
import glance.gateway
from glance import i18n
import glance.notifier
from glance.openstack.common import jsonutils as json
import glance.openstack.common.log as logging
import glance.schema
LOG = logging.getLogger(__name__)
_LE = i18n._LE
_LW = i18n._LW
_LI = i18n._LI
CONF = cfg.CONF
class NamespaceController(object):
def __init__(self, db_api=None, policy_enforcer=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.gateway = glance.gateway.Gateway(db_api=self.db_api,
policy_enforcer=self.policy)
self.ns_schema_link = '/v2/schemas/metadefs/namespace'
self.obj_schema_link = '/v2/schemas/metadefs/object'
def index(self, req, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
try:
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
# Get namespace id
if marker:
namespace_obj = ns_repo.get(marker)
marker = namespace_obj.namespace_id
database_ns_list = ns_repo.list(
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir, filters=filters)
for db_namespace in database_ns_list:
# Get resource type associations
filters = dict()
filters['namespace'] = db_namespace.namespace
rs_repo = (
self.gateway.get_metadef_resource_type_repo(req.context))
repo_rs_type_list = rs_repo.list(filters=filters)
resource_type_list = [ResourceTypeAssociation.to_wsme_model(
resource_type) for resource_type in repo_rs_type_list]
if resource_type_list:
db_namespace.resource_type_associations = (
resource_type_list)
namespace_list = [Namespace.to_wsme_model(
db_namespace,
get_namespace_href(db_namespace),
self.ns_schema_link) for db_namespace in database_ns_list]
namespaces = Namespaces()
namespaces.namespaces = namespace_list
if len(namespace_list) != 0 and len(namespace_list) == limit:
namespaces.next = namespace_list[-1].namespace
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
return namespaces
@utils.mutating
def create(self, req, namespace):
try:
namespace_created = False
# Create Namespace
ns_factory = self.gateway.get_metadef_namespace_factory(
req.context)
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
new_namespace = ns_factory.new_namespace(**namespace.to_dict())
ns_repo.add(new_namespace)
namespace_created = True
# Create Resource Types
rs_factory = (
self.gateway.get_metadef_resource_type_factory(req.context))
rs_repo = self.gateway.get_metadef_resource_type_repo(req.context)
if namespace.resource_type_associations:
for resource_type in namespace.resource_type_associations:
new_resource = rs_factory.new_resource_type(
namespace=namespace.namespace,
**resource_type.to_dict())
rs_repo.add(new_resource)
# Create Objects
object_factory = self.gateway.get_metadef_object_factory(
req.context)
object_repo = self.gateway.get_metadef_object_repo(req.context)
if namespace.objects:
for metadata_object in namespace.objects:
new_meta_object = object_factory.new_object(
namespace=namespace.namespace,
**metadata_object.to_dict())
object_repo.add(new_meta_object)
# Create Namespace Properties
prop_factory = (
self.gateway.get_metadef_property_factory(req.context))
prop_repo = self.gateway.get_metadef_property_repo(req.context)
if namespace.properties:
for (name, value) in namespace.properties.items():
new_property_type = (
prop_factory.new_namespace_property(
namespace=namespace.namespace,
**self._to_property_dict(name, value)
))
prop_repo.add(new_property_type)
except exception.Forbidden as e:
self._cleanup_namespace(ns_repo, namespace, namespace_created)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
self._cleanup_namespace(ns_repo, namespace, namespace_created)
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
self._cleanup_namespace(ns_repo, namespace, namespace_created)
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
# Return the user namespace as we don't expose the id to user
new_namespace.properties = namespace.properties
new_namespace.objects = namespace.objects
new_namespace.resource_type_associations = (
namespace.resource_type_associations)
return Namespace.to_wsme_model(new_namespace,
get_namespace_href(new_namespace),
self.ns_schema_link)
def _to_property_dict(self, name, value):
# Convert the model PropertyTypes dict to a JSON string
db_property_type_dict = dict()
db_property_type_dict['schema'] = tojson(PropertyType, value)
db_property_type_dict['name'] = name
return db_property_type_dict
def _cleanup_namespace(self, namespace_repo, namespace, namespace_created):
if namespace_created:
try:
namespace_obj = namespace_repo.get(namespace.namespace)
namespace_obj.delete()
namespace_repo.remove(namespace_obj)
msg = ("Cleaned up namespace %(namespace)s "
% {'namespace': namespace.namespace})
LOG.debug(msg)
except exception:
msg = (_LE("Failed to delete namespace %(namespace)s ") %
{'namespace': namespace.namespace})
LOG.error(msg)
def show(self, req, namespace, filters=None):
try:
# Get namespace
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
namespace_obj = ns_repo.get(namespace)
namespace_detail = Namespace.to_wsme_model(
namespace_obj,
get_namespace_href(namespace_obj),
self.ns_schema_link)
ns_filters = dict()
ns_filters['namespace'] = namespace
# Get objects
object_repo = self.gateway.get_metadef_object_repo(req.context)
db_metaobject_list = object_repo.list(filters=ns_filters)
object_list = [MetadefObject.to_wsme_model(
db_metaobject,
get_object_href(namespace, db_metaobject),
self.obj_schema_link) for db_metaobject in db_metaobject_list]
if object_list:
namespace_detail.objects = object_list
# Get resource type associations
rs_repo = self.gateway.get_metadef_resource_type_repo(req.context)
db_resource_type_list = rs_repo.list(filters=ns_filters)
resource_type_list = [ResourceTypeAssociation.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
if resource_type_list:
namespace_detail.resource_type_associations = (
resource_type_list)
# Get properties
prop_repo = self.gateway.get_metadef_property_repo(req.context)
db_properties = prop_repo.list(filters=ns_filters)
property_list = Namespace.to_model_properties(db_properties)
if property_list:
namespace_detail.properties = property_list
if filters and filters['resource_type']:
namespace_detail = self._prefix_property_name(
namespace_detail, filters['resource_type'])
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
return namespace_detail
def update(self, req, user_ns, namespace):
namespace_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
ns_obj = namespace_repo.get(namespace)
ns_obj.namespace = wsme_utils._get_value(user_ns.namespace)
ns_obj.display_name = wsme_utils._get_value(user_ns.display_name)
ns_obj.description = wsme_utils._get_value(user_ns.description)
# Following optional fields will default to same values as in
# create namespace if not specified
ns_obj.visibility = (
wsme_utils._get_value(user_ns.visibility) or 'private')
ns_obj.protected = (
wsme_utils._get_value(user_ns.protected) or False)
ns_obj.owner = (
wsme_utils._get_value(user_ns.owner) or req.context.owner)
updated_namespace = namespace_repo.save(ns_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
return Namespace.to_wsme_model(updated_namespace,
get_namespace_href(updated_namespace),
self.ns_schema_link)
def delete(self, req, namespace):
namespace_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
namespace_obj = namespace_repo.get(namespace)
namespace_obj.delete()
namespace_repo.remove(namespace_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def delete_objects(self, req, namespace):
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
namespace_obj = ns_repo.get(namespace)
namespace_obj.delete()
ns_repo.remove_objects(namespace_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def delete_properties(self, req, namespace):
ns_repo = self.gateway.get_metadef_namespace_repo(req.context)
try:
namespace_obj = ns_repo.get(namespace)
namespace_obj.delete()
ns_repo.remove_properties(namespace_obj)
except exception.Forbidden as e:
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(utils.exception_to_str(e))
raise webob.exc.HTTPInternalServerError()
def _prefix_property_name(self, namespace_detail, user_resource_type):
prefix = None
if user_resource_type and namespace_detail.resource_type_associations:
for resource_type in namespace_detail.resource_type_associations:
if resource_type.name == user_resource_type:
prefix = resource_type.prefix
break
if prefix:
if namespace_detail.properties:
new_property_dict = dict()
for (key, value) in namespace_detail.properties.items():
new_property_dict[prefix + key] = value
namespace_detail.properties = new_property_dict
if namespace_detail.objects:
for object in namespace_detail.objects:
new_object_property_dict = dict()
for (key, value) in object.properties.items():
new_object_property_dict[prefix + key] = value
object.properties = new_object_property_dict
if object.required and len(object.required) > 0:
required = [prefix + name for name in object.required]
object.required = required
return namespace_detail
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['self', 'schema', 'created_at', 'updated_at']
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(explanation=msg)
def index(self, request):
params = request.params.copy()
limit = params.pop('limit', None)
marker = params.pop('marker', None)
sort_dir = params.pop('sort_dir', 'desc')
if limit is None:
limit = CONF.limit_param_default
limit = min(CONF.api_limit_max, int(limit))
query_params = {
'sort_key': params.pop('sort_key', 'created_at'),
'sort_dir': self._validate_sort_dir(sort_dir),
'filters': self._get_filters(params)
}
if marker is not None:
query_params['marker'] = marker
if limit is not None:
query_params['limit'] = self._validate_limit(limit)
return query_params
def _validate_sort_dir(self, sort_dir):
if sort_dir not in ['asc', 'desc']:
msg = _('Invalid sort direction: %s') % sort_dir
raise webob.exc.HTTPBadRequest(explanation=msg)
return sort_dir
def _get_filters(self, filters):
visibility = filters.get('visibility')
if visibility:
if visibility not in ['public', 'private']:
msg = _('Invalid visibility value: %s') % visibility
raise webob.exc.HTTPBadRequest(explanation=msg)
return filters
def _validate_limit(self, limit):
try:
limit = int(limit)
except ValueError:
msg = _("limit param must be an integer")
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _("limit param must be positive")
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def show(self, request):
params = request.params.copy()
query_params = {
'filters': self._get_filters(params)
}
return query_params
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
namespace = fromjson(Namespace, body)
return dict(namespace=namespace)
def update(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
namespace = fromjson(Namespace, body)
return dict(user_ns=namespace)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def create(self, response, namespace):
ns_json = tojson(Namespace, namespace)
response = self.__render(ns_json, response, 201)
response.location = get_namespace_href(namespace)
def show(self, response, namespace):
ns_json = tojson(Namespace, namespace)
response = self.__render(ns_json, response)
def index(self, response, result):
params = dict(response.request.params)
params.pop('marker', None)
query = urlparse.urlencode(params)
result.first = "/v2/metadefs/namespaces"
result.schema = "/v2/schemas/metadefs/namespaces"
if query:
result.first = '%s?%s' % (result.first, query)
if result.next:
params['marker'] = result.next
next_query = urlparse.urlencode(params)
result.next = '/v2/metadefs/namespaces?%s' % next_query
ns_json = tojson(Namespaces, result)
response = self.__render(ns_json, response)
def update(self, response, namespace):
ns_json = tojson(Namespace, namespace)
response = self.__render(ns_json, response, 200)
def delete(self, response, result):
response.status_int = 204
def delete_objects(self, response, result):
response.status_int = 204
def delete_properties(self, response, result):
response.status_int = 204
def __render(self, json_data, response, response_status=None):
body = json.dumps(json_data, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
if response_status:
response.status_int = response_status
return response
def _get_base_definitions():
return get_schema_definitions()
def get_schema_definitions():
return {
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [
{"$ref": "#/definitions/positiveInteger"},
{"default": 0}
]
},
"stringArray": {
"type": "array",
"items": {"type": "string"},
# "minItems": 1,
"uniqueItems": True
},
"property": {
"type": "object",
"additionalProperties": {
"type": "object",
"required": ["title", "type"],
"properties": {
"name": {
"type": "string"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"type": {
"type": "string",
"enum": [
"array",
"boolean",
"integer",
"number",
"object",
"string",
None
]
},
"required": {
"$ref": "#/definitions/stringArray"
},
"minimum": {
"type": "number"
},
"maximum": {
"type": "number"
},
"maxLength": {
"$ref": "#/definitions/positiveInteger"
},
"minLength": {
"$ref": "#/definitions/positiveIntegerDefault0"
},
"pattern": {
"type": "string",
"format": "regex"
},
"enum": {
"type": "array"
},
"readonly": {
"type": "boolean"
},
"default": {},
"items": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": [
"array",
"boolean",
"integer",
"number",
"object",
"string",
None
]
},
"enum": {
"type": "array"
}
}
},
"maxItems": {
"$ref": "#/definitions/positiveInteger"
},
"minItems": {
"$ref": "#/definitions/positiveIntegerDefault0"
},
"uniqueItems": {
"type": "boolean",
"default": False
},
"additionalItems": {
"type": "boolean"
},
}
}
}
}
def _get_base_properties():
return {
"namespace": {
"type": "string",
"description": _("The unique namespace text."),
"maxLength": 80,
},
"display_name": {
"type": "string",
"description": _("The user friendly name for the namespace. Used "
"by UI if available."),
"maxLength": 80,
},
"description": {
"type": "string",
"description": _("Provides a user friendly description of the "
"namespace."),
"maxLength": 500,
},
"visibility": {
"type": "string",
"description": _("Scope of namespace accessibility."),
"enum": ["public", "private"],
},
"protected": {
"type": "boolean",
"description": _("If true, namespace will not be deletable."),
},
"owner": {
"type": "string",
"description": _("Owner of the namespace."),
"maxLength": 255,
},
"created_at": {
"type": "string",
"description": _("Date and time of namespace creation"
" (READ-ONLY)"),
"format": "date-time"
},
"updated_at": {
"type": "string",
"description": _("Date and time of the last namespace modification"
" (READ-ONLY)"),
"format": "date-time"
},
"schema": {
"type": "string"
},
"self": {
"type": "string"
},
"resource_type_associations": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"prefix": {
"type": "string"
},
"properties_target": {
"type": "string"
}
}
}
},
"properties": {
"$ref": "#/definitions/property"
},
"objects": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"description": {
"type": "string"
},
"required": {
"$ref": "#/definitions/stringArray"
},
"properties": {
"$ref": "#/definitions/property"
},
}
}
}
}
def get_schema():
properties = _get_base_properties()
definitions = _get_base_definitions()
mandatory_attrs = Namespace.get_mandatory_attrs()
schema = glance.schema.Schema(
'namespace',
properties,
required=mandatory_attrs,
definitions=definitions
)
return schema
def get_collection_schema():
namespace_schema = get_schema()
return glance.schema.CollectionSchema('namespaces', namespace_schema)
def get_namespace_href(namespace):
base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace
return base_href
def get_object_href(namespace_name, metadef_object):
base_href = ('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadef_object.name))
return base_href
def create_resource():
"""Namespaces resource factory method"""
schema = get_schema()
deserializer = RequestDeserializer(schema)
serializer = ResponseSerializer(schema)
controller = NamespaceController()
return wsgi.Resource(controller, deserializer, serializer)
| apache-2.0 | 8,594,759,101,912,330,000 | 36.983827 | 79 | 0.532749 | false |
taufique71/widespace-job-manager | graph.py | 1 | 1340 | class Graph:
def __init__(self, config):
self.config = config
self.vertices = list(config["jobs"].keys())
self.edges = []
self.outdegree = {}
for key, value in config["dependencies"].items():
for val in value:
self.edges.append( (key, val) )
self.update_outdegree()
return
def get_vertices(self):
return self.vertices
def get_edges(self):
return self.edges
def get_outdegree(self):
return self.outdegree
def get_zero_outdegree_vertices(self):
zero_outdegree_vertices = []
for v in self.vertices:
if self.outdegree[v] == 0:
zero_outdegree_vertices.append(v)
return zero_outdegree_vertices
def update_outdegree(self):
self.outdegree = {}
for v in self.vertices:
self.outdegree[v] = 0
for e in self.edges:
self.outdegree[e[0]] = self.outdegree[e[0]]+1
return
def remove_edge(self, edge):
self.edges.remove( edge )
self.update_outdegree()
return
def remove_vertex(self, vertex):
self.vertices.remove(vertex)
for e in list(self.edges):
if e[1] == vertex:
self.remove_edge(e)
self.update_outdegree()
return
| mit | 6,338,988,099,701,754,000 | 26.916667 | 57 | 0.551493 | false |
mhgp/convert_txts2lyx4shosetsukaninaro | cnv.py | 1 | 5770 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
import io
import re
import codecs
# カレントディレクトリのディレクトリ名
DIRECTORY_NAME = os.path.split(os.getcwd())[1]
#
def write_base_lyx_code(wf):
wf.write("""#LyX 2.2 created this file. For more info see http://www.lyx.org/
\\lyxformat 508
\\begin_document
\\begin_header
\\save_transient_properties true
\\origin unavailable
\\textclass jsbook
\\begin_preamble
\\usepackage{pxrubrica}
\\usepackage[dvipdfmx, bookmarkstype=toc, colorlinks=true, urlcolor=black, linkcolor=blue, citecolor=black, linktocpage=true, bookmarks=true]{hyperref}
\\usepackage{pxjahyper}
\\end_preamble
\\use_default_options true
\\maintain_unincluded_children false
\\language japanese
\\language_package default
\\inputencoding utf8-platex
\\fontencoding global
\\font_roman "default" "default"
\\font_sans "default" "default"
\\font_typewriter "default" "default"
\\font_math "auto" "auto"
\\font_default_family default
\\use_non_tex_fonts false
\\font_sc false
\\font_osf false
\\font_sf_scale 100 100
\\font_tt_scale 100 100
\\graphics default
\\default_output_format default
\\output_sync 0
\\bibtex_command default
\\index_command default
\\paperfontsize default
\\spacing single
\\use_hyperref false
\\pdf_bookmarks true
\\pdf_bookmarksnumbered true
\\pdf_bookmarksopen false
\\pdf_bookmarksopenlevel 1
\\pdf_breaklinks false
\\pdf_pdfborder false
\\pdf_colorlinks false
\\pdf_backref false
\\pdf_pdfusetitle true
\\papersize default
\\use_geometry false
\\use_package amsmath 1
\\use_package amssymb 1
\\use_package cancel 1
\\use_package esint 1
\\use_package mathdots 1
\\use_package mathtools 1
\\use_package mhchem 1
\\use_package stackrel 1
\\use_package stmaryrd 1
\\use_package undertilde 1
\\cite_engine basic
\\cite_engine_type default
\\biblio_style plain
\\use_bibtopic false
\\use_indices false
\\paperorientation portrait
\\suppress_date false
\\justification true
\\use_refstyle 1
\\index Index
\\shortcut idx
\\color #008000
\\end_index
\\secnumdepth -2
\\tocdepth 2
\\paragraph_separation indent
\\paragraph_indentation default
\\quotes_language english
\\papercolumns 1
\\papersides 1
\\paperpagestyle default
\\tracking_changes false
\\output_changes false
\\html_math_output 0
\\html_css_as_file 0
\\html_be_strict false
\\end_header
\\begin_body\n""")
wf.write("\\begin_layout Title\n" + DIRECTORY_NAME + "\n\\end_layout\n")
wf.write("""\\begin_layout Standard
\\begin_inset CommandInset toc
LatexCommand tableofcontents
\\end_inset
\\end_layout\n\n""")
write_body(wf)
wf.write("""\\end_body
\\end_document""")
# 本文の作成
def write_body(wf):
count = 0
while True:
count += 1
path = DIRECTORY_NAME + "-" + str(count) + ".txt";
if not os.path.exists(path):
break
txt2lyx(wf, path)
# 水平線の作成
def write_horizon(wf):
wf.write("""\\begin_layout Standard
\\begin_inset CommandInset line
LatexCommand rule
offset "0.5ex"
width "100col%"
height "1pt"
\\end_inset
\\end_layout\n""")
# ルビの作成
def write_ruby(wf):
wf.write("""\\begin_inset ERT
status open
\\begin_layout Plain Layout
\\backslash\n""")
wf.write("ruby[g]{" + body + "}{" + ruby + "}\n")
wf.write("""\\end_layout
\\end_inset""")
#
def write_line(wf, line):
wf.write("%s\n"%line)
#
def write_text(wf, line, bl_count):
# 空行処理
if (not line) or re.match(r"^[\s\u3000]+$", line):
bl_count += 1
return bl_count
if bl_count > 0:
wf.write("\\begin_layout Standard\n")
for i in range(0, bl_count):
wf.write("\\begin_inset VSpace defskip\n")
wf.write("\\end_inset\n")
wf.write("\\end_layout\n")
bl_count = 0
# 段落の作成
if line.startswith(' '):
#-- 段落(行下げあり)
wf.write("\\begin_layout Standard\n")
write_line(wf, line[1:])
wf.write("\\end_layout\n")
else:
#-- 段落(行下げなし)
wf.write("\\begin_layout Standard\n\\noindent\n")
write_line(wf, line)
wf.write("\\end_layout\n")
wf.write("\n")
return bl_count
#
def txt2lyx(wf, path):
line_num = 0
with codecs.open(path, 'r', encoding='utf-8') as f:
lines = re.split('\r\n|\r|\n', f.read())
preface_end_line = 0
for i,line in enumerate(lines):
if line == "********************************************":
preface_end_line = i
break
#Chapter Title
if preface_end_line > 0:
line_num = preface_end_line + 1
wf.write("\\begin_layout Chapter\n")
wf.write("%s\n"%lines[line_num])
wf.write("\\end_layout\n")
wf.write("\n")
# まえがき
bl_count = 0
for line_num in range(0, preface_end_line):
line = lines[line_num]
bl_count = write_text(wf, line, bl_count)
if preface_end_line > 0:
write_horizon(wf)
# 本文および後書き
bl_count = 0
is_start = True
for line in lines[preface_end_line + 2:]:
# あとがき
if line == "************************************************":
bl_count = 0
write_horizon(wf)
continue
# 本文
bl_count = write_text(wf, line, bl_count)
if is_start:
if bl_count > 0:
bl_count = 0
else:
is_start = False
# main
with io.open(DIRECTORY_NAME + '.lyx', mode='w', encoding='utf-8', newline='\n') as f:
write_base_lyx_code(f)
| mit | 8,881,267,687,739,330,000 | 24.156951 | 151 | 0.604278 | false |
moyaproject/moya | moya/namespaces.py | 1 | 3357 | from __future__ import unicode_literals
"""XML namespaces"""
admin = "http://moyaproject.com/admin"
auth = "http://moyaproject.com/auth"
blog = "http://moyaproject.com/blog"
comments = "http://moyaproject.com/comments"
db = "http://moyaproject.com/db"
default = "http://moyaproject.com"
email = "http://moyaproject.com/email"
feedback = "http://moyaproject.com/feedback"
forms = "http://moyaproject.com/forms"
fs = "http://moyaproject.com/fs"
html = "http://moyaproject.com/html"
image = "http://moyaproject.com/image"
jsonrpc = "http://moyaproject.com/jsonrpc"
let = "http://moyaproject.com/let"
links = "http://moyaproject.com/links"
preflight = "http://moyaproject.com/preflight"
recaptcha = "http://moyaproject.com/recaptcha"
run = default
soup = "http://moyaproject.com/soup"
tables = "http://moyaproject.com/tables"
test = "http://moyaproject.com/test"
thumbnail = "http://moyaproject.com/thumbnail"
widgets = "http://moyaproject.com/widgets"
wysihtml5 = "http://moyaproject.com/wysihtml5"
namespace_docs = {
"http://moyaproject.com/admin": """
Tags defined in the [link admin]Moya Admin[/link] library.
""",
"http://moyaproject.com/auth": """
Tags defined in [link auth]Moya Auth[/link] library.
""",
"http://moyaproject.com/blog": """
Tags defined in the [link blog]Moya Blog[/link] library.
""",
"http://moyaproject.com/comments": """
Tags defined in the [link comments]Moya Comments[/link] library.
""",
"http://moyaproject.com/db": """
Tags used to work with [link db]databases[/link].
""",
"http://moyaproject.com": """
The default namespace used for most of Moya's tags.
""",
"http://moyaproject.com/email": """
Tags related to [link email]email[/link].
""",
"http://moyaproject.com/feedback": """
Tags defined in [link feedbac]Moya Feedback[/link].
""",
"http://moyaproject.com/forms": """
Tags defined in [link forms]Moya Forms[/link].
""",
"http://moyaproject.com/fs": """
Tags for working with [link project#filesystems]filesystems[/link].
""",
"http://moyaproject.com/image": """
Tags for working with [link images]images[/link].
""",
"http://moyaproject.com/jsonrpc": """
Tags for creating [link jsonrpc]JSON RPC[/link] interfaces.
""",
"http://moyaproject.com/links": """
Tags for defining [link links]links[/link].
""",
"http://moyaproject.com/preflight": """
Tags for creating [link preflight]preflight checks[/link].
""",
"http://moyaproject.com/recaptcha": """
Tags defined in [link recaptcha]Moya Google Recaptcha[/link].
""",
"http://moyaproject.com/soup": """
Tags for processing HTML tags.
""",
"http://moyaproject.com/tables": """
Tags used to create [link tables]table[/link] content.
""",
"http://moyaproject.com/test": """
Tags to build unit tests.
""",
"http://moyaproject.com/thumbnail": """
Tags defined in the [link thumbnail]Moya Thumbnail[/link] library.
""",
"http://moyaproject.com/widgets": """
Widgets defined in [link widgets]Moya Widgets[/link].
""",
"http://moyaproject.com/wysihtml5": """
Tags to create a rich text editor with [link wysihtml5]WYSIHTML5[/link].
""",
}
| mit | 2,579,244,657,551,941,000 | 33.608247 | 80 | 0.626154 | false |
rspeer/csc-pysparse | examples/pysparse_test.py | 1 | 8278 | import math, os, sys, time
import numpy as Numeric
from pysparse import spmatrix
from pysparse import itsolvers
from pysparse import precon
ll = spmatrix.ll_mat(5,5)
print ll
print ll[1,1]
print ll
ll[2,1] = 1.0
ll[1,3] = 2.0
print ll
print ll.to_csr()
print ll[1,3]
print ll[1,-1]
print ll.nnz
ll.export_mtx('test.mtx')
L = spmatrix.ll_mat(10, 10)
for i in range(0, 10):
L[i,i] = float(i+1)
A = L.to_csr()
x = Numeric.ones([10], 'd')
y = Numeric.zeros([10], 'd')
print A, x, y
A.matvec(x, y)
print y
ll = spmatrix.ll_mat(100, 100)
for i in range(0, 100, 5):
for j in range(0, 100, 4):
ll[i,j] = 1.0/float(i+j+1)
A = ll.to_csr()
x = Numeric.arange(100).astype(Numeric.float)
y = Numeric.zeros(100, 'd')
z = Numeric.zeros(100, 'd')
A.matvec(x, y)
print y
print 'norm(y) = ', math.sqrt(Numeric.add.reduce(y))
##A.matvec_transp(x, z)
##print z
##print 'norm(z) = ', math.sqrt(Numeric.add.reduce(z))
L = spmatrix.ll_mat(10,10)
for i in range(10):
L[i,i] = float(i+1)
A = L.to_csr()
print A
x = Numeric.zeros(10, 'd')
b = Numeric.ones(10, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-8, 100)
print info, iter, relres
print x
if (info != 0):
print >> sys.stderr, 'cg not converged'
L2 = L.copy()
x = Numeric.zeros(10, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-8, 100)
print info, iter, relres
# -----------------------------------------------------------
print 'remove test'
n = 100
L = spmatrix.ll_mat(n, n)
for run in range(5):
print 'adding elements...'
for i in range(0,n,2):
for j in range (n):
L[i,j] = i+j+1
# print L
print L.nnz
print 'removing elements...'
for j in range(0,n,2):
for i in range (n):
L[i,j] = 0.0
# print L
print L.nnz
# -----------------------------------------------------------
print 'submatrix test'
n = 100
L = spmatrix.ll_mat(n, n)
for i in range (0, n, 2):
for j in range (1, n, 2):
L[i,j] = float(n*i + j);
print L[10:18,75:80]
print L[10:15,35:10]
print L[19:15,35:10]
# -----------------------------------------------------------
print 'submatrix assign test'
n = 10
L = spmatrix.ll_mat(n, n);
for i in range (0, n, 1):
for j in range (0, n, 1):
L[i,j] = 1.0;
print L
Z = spmatrix.ll_mat(n-2, n-2)
L[1:n-1,1:n-1] = Z
print L
print L.nnz
#------------------------------------------------------------
if 0:
f = open(os.environ['HOME']+'/matrices/poi2d_300.mtx')
t1 = time.clock()
L = ll_mat_from_mtx(f)
t_read = time.clock() - t1
f.close()
print 'time for reading matrix data from file: %.2f sec' % t_read
if 1:
t1 = time.clock()
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/poi2d_300.mtx')
t_read = time.clock() - t1
print 'time for reading matrix data from file: %.2f sec' % t_read
#------------------------------------------------------------
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/node4x3x1_A.mtx')
print L.shape, L.nnz
A = L.to_sss()
class diag_prec:
def __init__(self, A):
self.shape = A.shape
n = self.shape[0]
self.dinv = Numeric.zeros(n, 'd')
for i in xrange(n):
self.dinv[i] = 1.0 / A[i,i]
def precon(self, x, y):
Numeric.multiply(x, self.dinv, y)
def resid(A, b, x):
r = x.copy()
A.matvec(x, r)
r = b - r
return math.sqrt(Numeric.dot(r, r))
K_diag = diag_prec(A)
K_jac = precon.jacobi(A, 1.0, 1)
K_ssor = precon.ssor(A, 1.0, 1)
# K_ilu = precon.ilutp(L)
n = L.shape[0];
b = Numeric.arange(n).astype(Numeric.Float)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000)
print 'pcg, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000, K_diag)
print 'pcg, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000, K_jac)
print 'pcg, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.pcg(A, b, x, 1e-6, 1000, K_ssor)
print 'pcg, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000)
print 'minres, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000, K_diag)
print 'minres, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000, K_jac)
print 'minres, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.minres(A, b, x, 1e-6, 1000, K_ssor)
print 'minres, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000)
print 'qmrs, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000, K_diag)
print 'qmrs, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000, K_jac)
print 'qmrs, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.qmrs(A, b, x, 1e-6, 1000, K_ssor)
print 'qmrs, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000)
print 'cgs, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000, K_diag)
print 'cgs, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000, K_jac)
print 'cgs, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.cgs(A, b, x, 1e-6, 1000, K_ssor)
print 'cgs, K_ssor: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000)
print 'bicgstab, K_none: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000, K_diag)
print 'bicgstab, K_diag: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000, K_jac)
print 'bicgstab, K_jac: ', info, iter, relres, resid(A, b, x)
x = Numeric.zeros(n, 'd')
info, iter, relres = itsolvers.bicgstab(A, b, x, 1e-6, 1000, K_ssor)
print 'bicgstab, K_ssor: ', info, iter, relres, resid(A, b, x)
#------------------------------------------------------------
import superlu
L = spmatrix.ll_mat_from_mtx(os.environ['HOME']+'/matrices/cop18_el3_A.mtx')
##f = open('cop18_el5_A.mtx')
##L = ll_mat_from_mtx(f)
##f.close()
n11 = 4688
L = L[0:n11, 0:n11] # extract (1,1)-block
# make matrix regular
for i in xrange(n11):
L[i,i] = 1
print L.shape, L.nnz
n = L.shape[0]
B = L.to_csr()
su = superlu.factorize(B, diag_pivot_thresh=0.0)
print su.nnz
b = Numeric.arange(n).astype(Numeric.Float) / n
x = Numeric.zeros(n, 'd')
su.solve(b, x)
print 'norm(b) = %g' % math.sqrt(Numeric.dot(b, b))
print 'norm(x) = %g' % math.sqrt(Numeric.dot(x, x))
r = Numeric.zeros(n, 'd')
B.matvec(x, r)
r = b - r
print 'norm(b - A*x) = %g' % math.sqrt(Numeric.dot(r, r))
if 1:
for panel_size in [5, 10, 15]:
for relax in [1, 3, 5]:
for permc_spec in [0, 1, 2]:
for diag_pivot_thresh in [0.0, 0.5, 1.0]:
t1 = time.clock()
su = superlu.factorize(B,
panel_size=panel_size,
relax=relax,
permc_spec=permc_spec,
diag_pivot_thresh=diag_pivot_thresh)
t_fact = time.clock() - t1
t1 = time.clock()
su.solve(b, x)
t_solve = time.clock() - t1
print 'panel_size=%2d, relax=%d, permc_spec=%d, diag_pivot_thresh=%.1f nnz=%d, t_fact=%.2f, t_solve=%.2f' % \
(panel_size, relax, permc_spec, diag_pivot_thresh, su.nnz, t_fact, t_solve)
| bsd-2-clause | -5,013,317,756,455,400,000 | 28.35461 | 131 | 0.550374 | false |
CivicKnowledge/ambry-ui | ambry_ui/session.py | 1 | 1804 | # Stolen from: http://flask.pocoo.org/snippets/51/
from werkzeug.datastructures import CallbackDict
from flask.sessions import SessionInterface, SessionMixin
from itsdangerous import URLSafeTimedSerializer, BadSignature
class ItsdangerousSession(CallbackDict, SessionMixin):
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class ItsdangerousSessionInterface(SessionInterface):
salt = 'cookie-session'
session_class = ItsdangerousSession
def get_serializer(self, app):
if not app.secret_key:
return None
return URLSafeTimedSerializer(app.secret_key,
salt=self.salt)
def open_session(self, app, request):
s = self.get_serializer(app)
if s is None:
return None
val = request.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = app.permanent_session_lifetime.total_seconds()
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,domain=domain)
return
expires = self.get_expiration_time(app, session)
val = self.get_serializer(app).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=True,
domain=domain) | bsd-3-clause | -1,761,448,075,511,564,300 | 33.056604 | 77 | 0.622506 | false |
google-research/ssl_detection | third_party/FasterRCNN/FasterRCNN/modeling/model_box.py | 1 | 7199 | # -*- coding: utf-8 -*-
# File: model_box.py
import numpy as np
import tensorflow as tf
from collections import namedtuple
from tensorpack.tfutils.scope_utils import under_name_scope
from config import config
@under_name_scope()
def clip_boxes(boxes, window, name=None):
"""
Args:
boxes: nx4, xyxy
window: [h, w]
"""
boxes = tf.maximum(boxes, 0.0)
m = tf.tile(tf.reverse(window, [0]), [2]) # (4,)
boxes = tf.minimum(boxes, tf.cast(m, tf.float32), name=name)
return boxes
@under_name_scope()
def decode_bbox_target(box_predictions, anchors):
"""
Args:
box_predictions: (..., 4), logits
anchors: (..., 4), floatbox. Must have the same shape
Returns:
box_decoded: (..., 4), float32. With the same shape.
"""
orig_shape = tf.shape(anchors)
box_pred_txtytwth = tf.reshape(box_predictions, (-1, 2, 2))
box_pred_txty, box_pred_twth = tf.split(box_pred_txtytwth, 2, axis=1)
# each is (...)x1x2
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
clip = np.log(config.PREPROC.MAX_SIZE / 16.)
wbhb = tf.exp(tf.minimum(box_pred_twth, clip)) * waha
xbyb = box_pred_txty * waha + xaya
x1y1 = xbyb - wbhb * 0.5
x2y2 = xbyb + wbhb * 0.5 # (...)x1x2
out = tf.concat([x1y1, x2y2], axis=-2)
return tf.reshape(out, orig_shape)
@under_name_scope()
def encode_bbox_target(boxes, anchors):
"""
Args:
boxes: (..., 4), float32
anchors: (..., 4), float32
Returns:
box_encoded: (..., 4), float32 with the same shape.
"""
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
boxes_x1y1x2y2 = tf.reshape(boxes, (-1, 2, 2))
boxes_x1y1, boxes_x2y2 = tf.split(boxes_x1y1x2y2, 2, axis=1)
wbhb = boxes_x2y2 - boxes_x1y1
xbyb = (boxes_x2y2 + boxes_x1y1) * 0.5
# Note that here not all boxes are valid. Some may be zero
txty = (xbyb - xaya) / waha
twth = tf.log(wbhb / waha) # may contain -inf for invalid boxes
encoded = tf.concat([txty, twth], axis=1) # (-1x2x2)
return tf.reshape(encoded, tf.shape(boxes))
@under_name_scope()
def crop_and_resize(image, boxes, box_ind, crop_size, pad_border=True):
"""
Aligned version of tf.image.crop_and_resize, following our definition of
floating point boxes.
Args:
image: NCHW
boxes: nx4, x1y1x2y2
box_ind: (n,)
crop_size (int):
Returns:
n,C,size,size
"""
assert isinstance(crop_size, int), crop_size
boxes = tf.stop_gradient(boxes)
# TF's crop_and_resize produces zeros on border
if pad_border:
# this can be quite slow
image = tf.pad(image, [[0, 0], [0, 0], [1, 1], [1, 1]], mode='SYMMETRIC')
boxes = boxes + 1
@under_name_scope()
def transform_fpcoor_for_tf(boxes, image_shape, crop_shape):
"""
The way tf.image.crop_and_resize works (with normalized box):
Initial point (the value of output[0]): x0_box * (W_img - 1)
Spacing: w_box * (W_img - 1) / (W_crop - 1)
Use the above grid to bilinear sample.
However, what we want is (with fpcoor box):
Spacing: w_box / W_crop
Initial point: x0_box + spacing/2 - 0.5
(-0.5 because bilinear sample (in my definition) assumes floating point
coordinate
(0.0, 0.0) is the same as pixel value (0, 0))
This function transform fpcoor boxes to a format to be used by
tf.image.crop_and_resize
Returns:
y1x1y2x2
"""
x0, y0, x1, y1 = tf.split(boxes, 4, axis=1)
spacing_w = (x1 - x0) / tf.cast(crop_shape[1], tf.float32)
spacing_h = (y1 - y0) / tf.cast(crop_shape[0], tf.float32)
imshape = [
tf.cast(image_shape[0] - 1, tf.float32),
tf.cast(image_shape[1] - 1, tf.float32)
]
nx0 = (x0 + spacing_w / 2 - 0.5) / imshape[1]
ny0 = (y0 + spacing_h / 2 - 0.5) / imshape[0]
nw = spacing_w * tf.cast(crop_shape[1] - 1, tf.float32) / imshape[1]
nh = spacing_h * tf.cast(crop_shape[0] - 1, tf.float32) / imshape[0]
return tf.concat([ny0, nx0, ny0 + nh, nx0 + nw], axis=1)
image_shape = tf.shape(image)[2:]
boxes = transform_fpcoor_for_tf(boxes, image_shape, [crop_size, crop_size])
image = tf.transpose(image, [0, 2, 3, 1]) # nhwc
ret = tf.image.crop_and_resize(
image,
boxes,
tf.cast(box_ind, tf.int32),
crop_size=[crop_size, crop_size])
ret = tf.transpose(ret, [0, 3, 1, 2]) # ncss
return ret
@under_name_scope()
def roi_align(featuremap, boxes, resolution):
"""
Args:
featuremap: 1xCxHxW
boxes: Nx4 floatbox
resolution: output spatial resolution
Returns:
NxCx res x res
"""
# sample 4 locations per roi bin
ret = crop_and_resize(featuremap, boxes,
tf.zeros([tf.shape(boxes)[0]], dtype=tf.int32),
resolution * 2)
try:
avgpool = tf.nn.avg_pool2d
except AttributeError:
avgpool = tf.nn.avg_pool
ret = avgpool(
ret, [1, 1, 2, 2], [1, 1, 2, 2], padding='SAME', data_format='NCHW')
return ret
class RPNAnchors(namedtuple('_RPNAnchors', ['boxes', 'gt_labels', 'gt_boxes'])):
"""
boxes (FS x FS x NA x 4): The anchor boxes.
gt_labels (FS x FS x NA):
gt_boxes (FS x FS x NA x 4): Groundtruth boxes corresponding to each anchor.
"""
def encoded_gt_boxes(self):
return encode_bbox_target(self.gt_boxes, self.boxes)
def decode_logits(self, logits):
return decode_bbox_target(logits, self.boxes)
@under_name_scope()
def narrow_to(self, featuremap):
"""
Slice anchors to the spatial size of this featuremap.
"""
shape2d = tf.shape(featuremap)[2:] # h,w
slice3d = tf.concat([shape2d, [-1]], axis=0)
slice4d = tf.concat([shape2d, [-1, -1]], axis=0)
boxes = tf.slice(self.boxes, [0, 0, 0, 0], slice4d)
gt_labels = tf.slice(self.gt_labels, [0, 0, 0], slice3d)
gt_boxes = tf.slice(self.gt_boxes, [0, 0, 0, 0], slice4d)
return RPNAnchors(boxes, gt_labels, gt_boxes)
if __name__ == '__main__':
"""
Demonstrate what's wrong with tf.image.crop_and_resize.
Also reported at https://github.com/tensorflow/tensorflow/issues/26278
"""
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()
# want to crop 2x2 out of a 5x5 image, and resize to 4x4
image = np.arange(25).astype('float32').reshape(5, 5)
boxes = np.asarray([[1, 1, 3, 3]], dtype='float32')
target = 4
print(crop_and_resize(image[None, None, :, :], boxes, [0], target)[0][0])
"""
Expected values:
4.5 5 5.5 6
7 7.5 8 8.5
9.5 10 10.5 11
12 12.5 13 13.5
You cannot easily get the above results with tf.image.crop_and_resize.
Try out yourself here:
"""
print(
tf.image.crop_and_resize(image[None, :, :, None],
np.asarray([[1, 1, 2, 2]]) / 4.0, [0],
[target, target])[0][:, :, 0])
| apache-2.0 | 4,270,145,771,473,847,000 | 29.634043 | 80 | 0.600222 | false |
christiankaiser/spatial-tools | src/v.to.raster/v.to.raster.py | 1 | 4911 | #!/usr/bin/env python
"""
Converts a vector layer to a raster
"""
__version__ = '1.0.0'
__date__ = '2011-03-24'
__author__ = 'Christian Kaiser <[email protected]>'
import commands
import math
import osgeo.ogr as ogr
import osgeo.gdal as gdal
import osgeo.osr as osr
from optparse import OptionParser
import sys
USAGE = """v.to.raster
--input INPUT_LAYER --output OUTPUT_RASTER --attr ATTR
[--res RESOLUTION] [--size RASTERX,RASTERY]
[--envelope XMIN,YMIN,XMAX,YMAX]
"""
def vector_to_raster(ogrfile, attribute, rasterfile, rastersize=None,
res=None, minx=None, maxx=None, miny=None, maxy=None):
"""
Transforms an OGR compatible vector layer into a raster layer in HFA
format. The value of the provided attribute is used as value for the
raster. This function is based on gdal_rasterize, so Python needs access
to this tool.
"""
#print("vector_to_raster: opening %s" % ogrfile)
# Open the vector file
inds = ogr.Open(ogrfile)
if inds is None:
raise Exception("Unable to open %s\n" % ogrfile)
# Check if there is at least one layer in the OGR datasource.
nlyrs = inds.GetLayerCount()
if nlyrs < 1:
raise Exception("Data source %s does not have any layer.\n" % ogrfile)
# Get the layer from the vector file
#lyrname = os.path.splitext(os.path.basename(ogrfile))[0]
#try:
# lyr = inds.GetLayerByName(lyrname)
#except:
lyr = inds.GetLayer(0)
lyrname = lyr.GetLayerDefn().GetName()
if lyr == None:
raise Exception("Unable to open OGR layer in %s\n" % ogrfile)
# We have to create a new raster dataset first.
# Determine the extent of the vector file if the extent is not provided.
if minx == None or maxx == None or miny == None or maxy == None:
extent = lyr.GetExtent()
if minx == None: minx = extent[0]
if maxx == None: maxx = extent[1]
if miny == None: miny = extent[2]
if maxy == None: maxy = extent[3]
if minx > maxx:
minx = extent[0]
maxx = extent[1]
if miny > maxy:
miny = extent[2]
maxy = extent[3]
# Compute the resolution if not provided
if res is None:
xres = (maxx - minx) / rastersize[0]
yres = (maxy - miny) / rastersize[1]
res = xres
if yres > xres: res = yres
# Adjust the raster size to fit the extent proportions
sizex = int(math.ceil((maxx - minx) / res))
sizey = int(math.ceil((maxy - miny) / res))
# Create a new raster layer
rasterDriver = gdal.GetDriverByName('HFA')
outds = rasterDriver.Create(rasterfile, sizex, sizey, 1, gdal.GDT_Float64)
rasterTransform = [minx, res, 0.0, maxy, 0.0, -res]
outds.SetGeoTransform(rasterTransform)
# Get projection of OGR file and assign to raster
srs = osr.SpatialReference()
srs.ImportFromWkt(lyr.GetSpatialRef().__str__())
outds.SetProjection(srs.ExportToWkt())
# Close the vector and raster files.
inds = None
outds = None
# Execute gdal_rasterize
commandString = "gdal_rasterize -a %s -l %s %s %s" % (attribute, lyrname, ogrfile, rasterfile)
commandOutput = commands.getoutput(commandString)
if __name__ == "__main__":
parser = OptionParser(usage=USAGE)
parser.add_option(
'-i', '--input', dest="input",
help="OGR compatible input vector layer",
metavar="INPUT_LAYER"
)
parser.add_option(
'-o', '--output', dest="output",
help="Path to output raster file",
metavar="OUTPUT_RASTER"
)
parser.add_option(
'-a', '--attr', dest="attr",
help="Attribute name containing the pixel value",
metavar="ATTR"
)
parser.add_option(
'-r', '--res', dest="res",
help="Raster pixel size (image resolution)"
)
parser.add_option(
'-s', '--size', dest="size",
help="Raster size"
)
parser.add_option(
'-e', '--env', dest="env",
help="Bounding box"
)
(options, args) = parser.parse_args()
if options.size != None:
size = map(int, options.size.split(','))
else:
size = None
if options.res != None:
res = float(options.res)
else:
res = None
if options.env != None:
xmin, ymin, xmax, ymax = map(float, options.env.split(','))
else:
xmin = ymin = xmax = ymax = None
if options.input == None or options.output == None or options.attr == None:
print USAGE
sys.exit(0)
print("v.to.raster starting...")
vector_to_raster(
ogrfile = options.input,
attribute = options.attr,
rasterfile = options.output,
rastersize = size,
res = res,
minx = xmin, maxx = xmax, miny = ymin, maxy = ymax
)
print("v.to.raster done")
| gpl-3.0 | -648,093,374,068,651,400 | 29.314815 | 98 | 0.59438 | false |
nfqsolutions/pylm | setup.py | 1 | 2731 | #!/usr/bin/env python
from setuptools import setup
__version__ = None
with open('pylm/__init__.py') as f:
exec(f.read())
long_description = """
Pylm
====
Pylm is the Python implementation of PALM, a framework to build
clusters of high performance components. It is presented in two
different levels of abstraction. In the high level API you will find
servers and clients that are functional *out of the box*. Use the high
level API if you are interested in simple communication patterns like
client-server, master-slave or a streaming pipeline. In the low level
API there are a variety of small components that, once combined,
they can be used to implement almost any kind of
component. It's what the high level API uses under the hood. Choose
the low level API if you are interested in creating your custom
component and your custom communication pattern.
**Pylm requires a version of Python equal or higher than 3.4, and it is
more thoroughly tested with Python 3.5.**
Installing **pylm** is as easy as:
.. code-block:: bash
$> pip install pylm
* `PYPI package page <https://pypi.python.org/pypi/pylm/>`_
* `Documentation <http://pylm.readthedocs.io/en/latest/>`_
* `Source code <https://github.com/nfqsolutions/pylm>`_
Pylm is released under a dual licensing scheme. The source is released
as-is under the the AGPL version 3 license, a copy of the license is
included with the source. If this license does not suit you,
you can purchase a commercial license from `NFQ Solutions
<http://nfqsolutions.com>`_
This project has been funded by the Spanish Ministry of Economy and
Competitivity under the grant IDI-20150936, cofinanced with FEDER
funds.
"""
setup(name='pylm',
version=__version__,
description='A framework to build clusters of high performance components',
long_description=long_description,
author='Guillem Borrell',
author_email='[email protected]',
packages=['pylm',
'pylm.parts',
'pylm.persistence',
'pylm.remote'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Distributed Computing',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: GNU Affero General Public License v3'
],
setup_requires=['pytest-runner'],
install_requires=['protobuf>=3.0.0', 'requests', 'pyzmq']
)
| agpl-3.0 | -3,243,969,502,929,084,000 | 34.467532 | 81 | 0.682534 | false |
hubo1016/vlcp | vlcp/service/kvdb/objectdb.py | 1 | 63427 | '''
Created on 2016/3/24
:author: hubo
'''
from vlcp.config.config import defaultconfig
import vlcp.service.kvdb.storage as storage
import vlcp.service.kvdb.redisnotifier as redisnotifier
from vlcp.server.module import depend, Module, call_api, api
import vlcp.utils.dataobject as dataobj
from vlcp.event.runnable import RoutineContainer
from vlcp.event.event import Event, withIndices, M_
from time import time
from copy import deepcopy
from vlcp.event.core import QuitException, syscall_removequeue
import itertools
from vlcp.utils.dataobject import AlreadyExistsException, UniqueKeyReference,\
MultiKeyReference, DataObjectSet, UniqueKeySet, WeakReferenceObject,\
MultiKeySet, ReferenceObject, request_context
from contextlib import closing
import functools
import copy
from vlcp.utils.exceptions import AsyncTransactionLockException, StaleResultException,\
TransactionRetryExceededException, TransactionTimeoutException, WalkKeyNotRetrieved
try:
from itertools import izip
except ImportError:
izip = zip
@withIndices()
class RetrieveRequestSend(Event):
pass
@withIndices('id')
class RetrieveReply(Event):
pass
def _str(b):
if isinstance(b, str):
return b
elif isinstance(b, bytes):
return b.decode('utf-8')
else:
return str(b)
def _str2(b):
if isinstance(b, str):
return b
elif isinstance(b, bytes):
return b.decode('utf-8')
elif hasattr(b, 'getkey'):
return b.getkey()
else:
return str(b)
class _NeedMoreKeysException(Exception):
pass
@defaultconfig
@depend(storage.KVStorage, redisnotifier.UpdateNotifier)
class ObjectDB(Module):
"""
Abstract transaction layer for KVDB
"""
service = True
# Priority for object update event
_default_objectupdatepriority = 450
# Enable debugging mode for updater: all updaters will be called for an extra time
# to make sure it does not crash with multiple calls
_default_debuggingupdater = False
def __init__(self, server):
Module.__init__(self, server)
self._managed_objs = {}
self._watches = {}
self._requestids = {}
self._watchedkeys = set()
self._requests = []
self._transactno = 0
self._stale = False
self._updatekeys = set()
self._update_version = {}
self._cache = None
self._pending_gc = 0
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._update
self.routines.append(self.apiroutine)
self.create_api(api(self.mget, self.apiroutine),
api(self.get, self.apiroutine),
api(self.mgetonce, self.apiroutine),
api(self.getonce, self.apiroutine),
api(self.mwatch, self.apiroutine),
api(self.watch, self.apiroutine),
api(self.munwatch, self.apiroutine),
api(self.unwatch, self.apiroutine),
api(self.unwatchall, self.apiroutine),
api(self.transact, self.apiroutine),
api(self.watchlist),
api(self.walk, self.apiroutine),
api(self.gettimestamp, self.apiroutine),
api(self.asynctransact, self.apiroutine),
api(self.writewalk, self.apiroutine),
api(self.asyncwritewalk, self.apiroutine)
)
def _set_watch(self, key, requestid):
self._watches.setdefault(key, set()).add(requestid)
self._requestids.setdefault(requestid, set()).add(key)
def _remove_watch(self, key, requestid):
s = self._watches.get(key)
if s:
s.discard(requestid)
if not s:
del self._watches[key]
s = self._requestids.get(requestid)
if s:
s.discard(key)
if not s:
del self._requestids[requestid]
def _remove_all_watches(self, requestid):
s = self._requestids.get(requestid)
if s is not None:
for k in s:
s2 = self._watches.get(k)
if s2:
s2.discard(requestid)
if not s2:
del self._watches[k]
del self._requestids[requestid]
async def load(self, container):
self.scheduler.queue.addSubQueue(\
self.objectupdatepriority, dataobj.DataObjectUpdateEvent.createMatcher(), 'dataobjectupdate')
self._notifier = await call_api(container, 'updatenotifier', 'createnotifier')
await Module.load(self, container)
self.routines.append(self._notifier)
async def unload(self, container, force=False):
await container.syscall(syscall_removequeue(self.scheduler.queue, 'dataobjectupdate'))
await Module.unload(self, container, force=force)
async def _update(self):
timestamp = '%012x' % (int(time() * 1000),) + '-'
notification_matcher = self._notifier.notification_matcher(False)
def copywithkey(obj, key):
newobj = deepcopy(obj)
if hasattr(newobj, 'setkey'):
newobj.setkey(key)
return newobj
def getversion(obj):
if obj is None:
return (0, -1)
else:
return (getattr(obj, 'kvdb_createtime', 0), getattr(obj, 'kvdb_updateversion', 0))
def isnewer(obj, version):
if obj is None:
return version[1] != -1
else:
return getversion(obj) > version
request_matcher = RetrieveRequestSend.createMatcher()
def onupdate(event, matcher):
update_keys = self._watchedkeys.intersection([_str(k) for k in event.keys])
self._updatekeys.update(update_keys)
if event.extrainfo:
for k,v in zip(event.keys, event.extrainfo):
k = _str(k)
if k in update_keys:
v = tuple(v)
oldv = self._update_version.get(k, (0, -1))
if oldv < v:
self._update_version[k] = v
else:
for k in event.keys:
try:
del self._update_version[_str(k)]
except KeyError:
pass
async def updateinner():
processing_requests = []
# New managed keys
retrieve_list = set()
orig_retrieve_list = set()
retrieveonce_list = set()
orig_retrieveonce_list = set()
processing_request_ids = set()
# Retrieved values are stored in update_result before merging into current storage
update_result = {}
# key => [(walker_func, (original_keys, rid)), ...]
walkers = {}
# Use the loop count as a revision identifier, then the valid revisions of the value
# in update_result is a range, from the last loop count the value changed
# (or -1 if not changed), to the last loop count the value is retrieved
#
# each walker can only walk on keys that shares at least one revision to ensure the
# values are consistent. If no revision could be shared, all the keys must be retrieved
# again to get a consistent view
revision_min = {}
revision_max = {}
self._loopCount = 0
# A request-id -> retrieve set dictionary to store the saved keys
savelist = {}
# (start_key, walker_func, rid) => set(used_keys)
walker_used_keys = {}
# used_key => [(start_key, walker_func, (original_keys, rid)), ...]
used_key_ref = {}
def _update_walker_ref(start_key, walker, original_keys, rid, used_keys):
old_used_keys = walker_used_keys.get((start_key, walker, rid), ())
for k in old_used_keys:
if k not in used_keys:
old_list = used_key_ref[k]
for i, v in enumerate(old_list):
if v[0] == start_key and v[1] == walker and v[2][1] == rid:
break
else:
continue
old_list[i:] = old_list[i+1:]
for k in used_keys:
if k not in old_used_keys:
used_key_ref.setdefault(k, []).append((start_key, walker, (original_keys, rid)))
walker_used_keys[(start_key, walker, rid)] = set(used_keys)
# (start_key, walker, rid) => cached_result
finished_walkers = {}
def _dirty_walkers(new_values):
for k in new_values:
if k in used_key_ref:
for start_key, walker, (_, rid) in used_key_ref[k]:
finished_walkers.pop((start_key, walker, rid), None)
async def updateloop():
while (retrieve_list or self._updatekeys or self._requests):
# default walker, default walker cached, customized walker, customized walker cached
_performance_counters = [0, 0, 0, 0]
# Updated keys
update_list = set()
if self._loopCount >= 10 and not retrieve_list:
if not self._updatekeys:
break
elif self._loopCount >= 100:
# Too many updates, we must stop to respond
self._logger.warning("There are still database updates after 100 loops of mget, respond with potential inconsistent values")
break
if self._updatekeys:
update_list.update(self._updatekeys)
self._updatekeys.clear()
if self._requests:
# Processing requests
for r in self._requests:
if r[2] == 'unwatch':
try:
for k in r[0]:
self._remove_watch(k, r[3])
# Do not need to wait
except Exception as exc:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], result = None))
elif r[2] == 'unwatchall':
if r[3] in processing_request_ids:
# unwatch a processing request
# pend this request until all requests are processed
processing_requests.append(r)
else:
try:
self._remove_all_watches(r[3])
except Exception as exc:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], result = None))
elif r[2] == 'watch':
retrieve_list.update(r[0])
orig_retrieve_list.update(r[0])
for k in r[0]:
self._set_watch(k, r[3])
processing_requests.append(r)
processing_request_ids.add(r[3])
elif r[2] == 'get':
retrieve_list.update(r[0])
orig_retrieve_list.update(r[0])
processing_requests.append(r)
processing_request_ids.add(r[3])
elif r[2] == 'walk':
retrieve_list.update(r[0])
processing_requests.append(r)
for k,v in r[3].items():
walkers.setdefault(k, []).append((v, (r[0], r[1])))
processing_request_ids.add(r[4])
else:
retrieveonce_list.update(r[0])
orig_retrieveonce_list.update(r[0])
processing_requests.append(r)
del self._requests[:]
if retrieve_list:
watch_keys = tuple(k for k in retrieve_list if k not in self._watchedkeys)
# Add watch_keys to notification
if watch_keys:
for k in watch_keys:
if k in update_result:
self._update_version[k] = getversion(update_result[k])
await self._notifier.add_listen(*watch_keys)
self._watchedkeys.update(watch_keys)
get_list_set = update_list.union(itertools.chain((k for k in retrieve_list
if k not in update_result and k not in self._managed_objs),
(k for k in retrieveonce_list
if k not in update_result and k not in self._managed_objs)))
get_list = list(get_list_set)
new_values = set()
if get_list:
try:
result, self._cache = await call_api(
self.apiroutine,
'kvstorage',
'mgetwithcache',
{'keys': get_list, 'cache': self._cache}
)
except QuitException:
raise
except Exception:
# Serve with cache
if not self._stale:
self._logger.warning('KVStorage retrieve failed, serve with cache', exc_info = True)
self._stale = True
# Discard all retrieved results
update_result.clear()
# Retry update later
self._updatekeys.update(update_list)
#break
revision_min.clear()
revision_max.clear()
else:
self._stale = False
for k,v in izip(get_list, result):
# Update revision information
revision_max[k] = self._loopCount
if k not in update_result:
if k not in self._managed_objs:
# A newly retrieved key
revision_min[k] = self._loopCount
old_value = None
else:
old_value = self._managed_objs[k]
else:
old_value = update_result[k]
# Check if the value is changed
if old_value is not v and getversion(old_value) != getversion(v):
revision_min[k] = self._loopCount
new_values.add(k)
else:
if k not in revision_min:
revision_min[k] = -1
if old_value is not v:
if v is not None and hasattr(v, 'setkey'):
v.setkey(k)
if k in self._watchedkeys and k not in self._update_version:
self._update_version[k] = getversion(v)
update_result.update(zip(get_list, result))
# Disable cache for walkers with updated keys
_dirty_walkers(new_values)
# All keys which should be retrieved in next loop
new_retrieve_list = set()
# Keys which should be retrieved in next loop for a single walk
new_retrieve_keys = set()
# Keys that are used in current walk will be retrieved again in next loop
used_keys = set()
# We separate the data with revisions to prevent inconsistent result
def create_walker(orig_key, strict=True):
revision_range = [revision_min.get(orig_key, -1), revision_max.get(orig_key, -1)]
def _walk_with_revision(key):
if hasattr(key, 'getkey'):
key = key.getkey()
key = _str(key)
if key not in self._watchedkeys:
# This key is not retrieved, raise a KeyError, and record this key
new_retrieve_keys.add(key)
raise WalkKeyNotRetrieved(key)
elif self._stale:
if key not in self._managed_objs:
new_retrieve_keys.add(key)
used_keys.add(key)
return self._managed_objs.get(key)
elif key not in update_result and key not in self._managed_objs:
# This key is not retrieved, raise a KeyError, and record this key
new_retrieve_keys.add(key)
raise WalkKeyNotRetrieved(key)
# Check revision
current_revision = (
max(revision_min.get(key, -1), revision_range[0]),
min(revision_max.get(key, -1), revision_range[1])
)
if current_revision[1] < current_revision[0]:
# revisions cannot match
new_retrieve_keys.add(key)
if strict:
used_keys.add(key)
raise WalkKeyNotRetrieved(key)
else:
# update revision range
revision_range[:] = current_revision
if key in update_result:
used_keys.add(key)
return update_result[key]
else:
used_keys.add(key)
return self._managed_objs[key]
return _walk_with_revision
_default_walker_dup_check = set()
def default_walker(key, obj, walk, _circle_detect = None):
if _circle_detect is None:
_circle_detect = set()
if key in _circle_detect:
return
else:
_circle_detect.add(key)
if hasattr(obj, 'kvdb_internalref'):
rl = obj.kvdb_internalref()
for k in rl:
try:
newobj = walk(k)
except KeyError:
pass
else:
if newobj is not None:
default_walker(k, newobj, walk, _circle_detect)
def _do_default_walker(k):
if k not in _default_walker_dup_check:
_default_walker_dup_check.add(k)
_performance_counters[0] += 1
if (k, None, None) not in finished_walkers:
v = update_result.get(k)
if v is not None:
new_retrieve_keys.clear()
used_keys.clear()
default_walker(k, v, create_walker(k, False))
if new_retrieve_keys:
new_retrieve_list.update(new_retrieve_keys)
self._updatekeys.update(used_keys)
self._updatekeys.add(k)
else:
_all_used_keys = used_keys.union([k])
_update_walker_ref(k, None, None, None, _all_used_keys)
finished_walkers[(k, None, None)] = None
else:
_update_walker_ref(k, None, None, None, [k])
finished_walkers[(k, None, None)] = None
else:
_performance_counters[1] += 1
for k in orig_retrieve_list:
_do_default_walker(k)
savelist.clear()
for k,ws in walkers.items():
# k: the walker key
# ws: list of [walker_func, (request_original_keys, rid)]
# Retry every walker, starts with k, with the value of v
if k in update_result:
# The value is newly retrieved
v = update_result.get(k)
else:
# Use the stored value
v = self._managed_objs.get(k)
if ws:
for w,r in list(ws):
# w: walker_func
# r: (request_original_keys, rid)
# Custom walker
_performance_counters[2] += 1
_cache_key = (k, w, r[1])
if _cache_key in finished_walkers:
_performance_counters[3] += 1
savelist.setdefault(r[1], set()).update(finished_walkers[_cache_key])
else:
_local_save_list = set()
def save(key):
if hasattr(key, 'getkey'):
key = key.getkey()
key = _str(key)
if key != k and key not in used_keys:
raise ValueError('Cannot save a key without walk')
_local_save_list.add(key)
try:
new_retrieve_keys.clear()
used_keys.clear()
w(k, v, create_walker(k), save)
except Exception as exc:
# if one walker failed, the whole request is failed, remove all walkers
self._logger.warning("A walker raises an exception which rolls back the whole walk process. "
"walker = %r, start key = %r, new_retrieve_keys = %r, used_keys = %r",
w, k, new_retrieve_keys, used_keys, exc_info=True)
for orig_k in r[0]:
if orig_k in walkers:
walkers[orig_k][:] = [(w0, r0) for w0,r0 in walkers[orig_k] if r0[1] != r[1]]
processing_requests[:] = [r0 for r0 in processing_requests if r0[1] != r[1]]
savelist.pop(r[1])
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
savelist.setdefault(r[1], set()).update(_local_save_list)
if new_retrieve_keys:
new_retrieve_list.update(new_retrieve_keys)
self._updatekeys.update(used_keys)
self._updatekeys.add(k)
else:
_all_used_keys = used_keys.union([k])
_update_walker_ref(k, w, r[0], r[1], _all_used_keys)
finished_walkers[_cache_key] = _local_save_list
for save in savelist.values():
for k in save:
_do_default_walker(k)
retrieve_list.clear()
retrieveonce_list.clear()
retrieve_list.update(new_retrieve_list)
self._logger.debug("Loop %d: %d default walker (%d cached), %d customized walker (%d cached)",
self._loopCount,
*_performance_counters)
self._loopCount += 1
if self._stale:
watch_keys = tuple(k for k in retrieve_list if k not in self._watchedkeys)
if watch_keys:
await self._notifier.add_listen(*watch_keys)
self._watchedkeys.update(watch_keys)
break
while True:
await self.apiroutine.with_callback(updateloop(), onupdate, notification_matcher)
if self._loopCount >= 100 or self._stale:
break
# If some updated result is newer than the notification version, we should wait for the notification
should_wait = False
for k,v in update_result.items():
if k in self._watchedkeys:
oldv = self._update_version.get(k)
if oldv is not None and isnewer(v, oldv):
should_wait = True
break
if should_wait:
timeout, ev, m = await self.apiroutine.wait_with_timeout(0.2, notification_matcher)
if timeout:
break
else:
onupdate(ev, m)
else:
break
# Update result
send_events = []
self._transactno += 1
transactid = '%s%016x' % (timestamp, self._transactno)
update_objs = []
for k,v in update_result.items():
if k in self._watchedkeys:
if v is None:
oldv = self._managed_objs.get(k)
if oldv is not None:
if hasattr(oldv, 'kvdb_detach'):
oldv.kvdb_detach()
update_objs.append((k, oldv, dataobj.DataObjectUpdateEvent.DELETED))
else:
update_objs.append((k, None, dataobj.DataObjectUpdateEvent.DELETED))
del self._managed_objs[k]
else:
oldv = self._managed_objs.get(k)
if oldv is not None:
if oldv != v:
if oldv and hasattr(oldv, 'kvdb_update'):
oldv.kvdb_update(v)
update_objs.append((k, oldv, dataobj.DataObjectUpdateEvent.UPDATED))
else:
if hasattr(oldv, 'kvdb_detach'):
oldv.kvdb_detach()
self._managed_objs[k] = v
update_objs.append((k, v, dataobj.DataObjectUpdateEvent.UPDATED))
else:
self._managed_objs[k] = v
update_objs.append((k, v, dataobj.DataObjectUpdateEvent.UPDATED))
for k in update_result.keys():
v = self._managed_objs.get(k)
if v is not None and hasattr(v, 'kvdb_retrievefinished'):
v.kvdb_retrievefinished(self._managed_objs)
allkeys = tuple(k for k,_,_ in update_objs)
send_events.extend((dataobj.DataObjectUpdateEvent(k, transactid, t, object = v, allkeys = allkeys) for k,v,t in update_objs))
# Process requests
unwatchall = []
for r in processing_requests:
if r[2] == 'get':
objs = [self._managed_objs.get(k) for k in r[0]]
for k,v in zip(r[0], objs):
if v is not None:
self._set_watch(k, r[3])
result = [o.create_reference() if o is not None and hasattr(o, 'create_reference') else o
for o in objs]
elif r[2] == 'watch':
result = [(v.create_reference() if hasattr(v, 'create_reference') else v)
if v is not None else dataobj.ReferenceObject(k)
for k,v in ((k,self._managed_objs.get(k)) for k in r[0])]
elif r[2] == 'walk':
saved_keys = list(savelist.get(r[1], []))
for k in saved_keys:
self._set_watch(k, r[4])
objs = [self._managed_objs.get(k) for k in saved_keys]
result = (saved_keys,
[o.create_reference() if hasattr(o, 'create_reference') else o
if o is not None else dataobj.ReferenceObject(k)
for k,o in zip(saved_keys, objs)])
elif r[2] == 'unwatchall':
# Remove watches after all results are processed
unwatchall.append(r[3])
result = None
else:
result = [copywithkey(update_result.get(k, self._managed_objs.get(k)), k) for k in r[0]]
send_events.append(RetrieveReply(r[1], result = result, stale = self._stale))
for requestid in unwatchall:
self._remove_all_watches(requestid)
async def output_result():
for e in send_events:
await self.apiroutine.wait_for_send(e)
await self.apiroutine.with_callback(output_result(), onupdate, notification_matcher)
self._pending_gc += 1
async def _gc():
# Use DFS to remove unwatched objects
mark_set = set()
def dfs(k):
if k in mark_set:
return
mark_set.add(k)
v = self._managed_objs.get(k)
if v is not None and hasattr(v, 'kvdb_internalref'):
for k2 in v.kvdb_internalref():
dfs(k2)
for k in self._watches.keys():
dfs(k)
remove_keys = self._watchedkeys.difference(mark_set)
if remove_keys:
self._watchedkeys.difference_update(remove_keys)
await self._notifier.remove_listen(*tuple(remove_keys))
for k in remove_keys:
if k in self._managed_objs:
del self._managed_objs[k]
if k in self._update_version:
del self._update_version[k]
if self._cache is not None:
self._cache.gc(self._managed_objs)
self._pending_gc = 0
while True:
if not self._updatekeys and not self._requests:
if self._pending_gc >= 10:
await self.apiroutine.with_callback(_gc(), onupdate, notification_matcher)
continue
elif self._pending_gc:
timeout, ev, m = await self.apiroutine.wait_with_timeout(1, notification_matcher, request_matcher)
if timeout:
await self.apiroutine.with_callback(_gc(), onupdate, notification_matcher)
continue
else:
ev, m = await M_(notification_matcher, request_matcher)
if m is notification_matcher:
onupdate(ev, m)
await updateinner()
async def mget(self, keys, requestid, nostale = False):
"Get multiple objects and manage them. Return references to the objects."
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'get', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def get(self, key, requestid, nostale = False):
"""
Get an object from specified key, and manage the object.
Return a reference to the object or None if not exists.
"""
r = await self.mget([key], requestid, nostale)
return r[0]
async def mgetonce(self, keys, nostale = False):
"Get multiple objects, return copies of them. Referenced objects are not retrieved."
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'getonce'))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def getonce(self, key, nostale = False):
"Get a object without manage it. Return a copy of the object, or None if not exists. Referenced objects are not retrieved."
r = await self.mgetonce([key], nostale)
return r[0]
async def watch(self, key, requestid, nostale = False):
"""
Try to find an object and return a reference. Use ``reference.isdeleted()`` to test
whether the object exists.
Use ``reference.wait(container)`` to wait for the object to be existed.
"""
r = await self.mwatch([key], requestid, nostale)
return r[0]
async def mwatch(self, keys, requestid, nostale = False):
"Try to return all the references, see ``watch()``"
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'watch', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def unwatch(self, key, requestid):
"Cancel management of a key"
await self.munwatch([key], requestid)
async def unwatchall(self, requestid):
"Cancel management for all keys that are managed by requestid"
notify = not self._requests
rid = object()
self._requests.append(((), rid, 'unwatchall', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
async def munwatch(self, keys, requestid):
"Cancel management of keys"
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'unwatch', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
async def transact(self, keys, updater, withtime = False, maxtime = 60):
"""
Try to update keys in a transact, with an ``updater(keys, values)``,
which returns ``(updated_keys, updated_values)``.
The updater may be called more than once. If ``withtime = True``,
the updater should take three parameters:
``(keys, values, timestamp)`` with timestamp as the server time
"""
keys = tuple(_str2(k) for k in keys)
updated_ref = [None, None]
extra_keys = []
extra_key_set = []
auto_remove_keys = set()
orig_len = len(keys)
def updater_with_key(keys, values, timestamp):
# Automatically manage extra keys
remove_uniquekeys = []
remove_multikeys = []
update_uniquekeys = []
update_multikeys = []
keystart = orig_len + len(auto_remove_keys)
for v in values[:keystart]:
if v is not None:
if hasattr(v, 'kvdb_uniquekeys'):
remove_uniquekeys.extend((k,v.create_weakreference()) for k in v.kvdb_uniquekeys())
if hasattr(v, 'kvdb_multikeys'):
remove_multikeys.extend((k,v.create_weakreference()) for k in v.kvdb_multikeys())
if self.debuggingupdater:
# Updater may be called more than once, ensure that this updater does not crash
# on multiple calls
kc = keys[:orig_len]
vc = [v.clone_instance() if v is not None and hasattr(v, 'clone_instance') else deepcopy(v) for v in values[:orig_len]]
if withtime:
updated_keys, updated_values = updater(kc, vc, timestamp)
else:
updated_keys, updated_values = updater(kc, vc)
if withtime:
updated_keys, updated_values = updater(keys[:orig_len], values[:orig_len], timestamp)
else:
updated_keys, updated_values = updater(keys[:orig_len], values[:orig_len])
for v in updated_values:
if v is not None:
if hasattr(v, 'kvdb_uniquekeys'):
update_uniquekeys.extend((k,v.create_weakreference()) for k in v.kvdb_uniquekeys())
if hasattr(v, 'kvdb_multikeys'):
update_multikeys.extend((k,v.create_weakreference()) for k in v.kvdb_multikeys())
extrakeysdict = dict(zip(keys[keystart:keystart + len(extra_keys)], values[keystart:keystart + len(extra_keys)]))
extrakeysetdict = dict(zip(keys[keystart + len(extra_keys):keystart + len(extra_keys) + len(extra_key_set)],
values[keystart + len(extra_keys):keystart + len(extra_keys) + len(extra_key_set)]))
tempdict = {}
old_values = dict(zip(keys, values))
updated_keyset = set(updated_keys)
try:
append_remove = set()
autoremove_keys = set()
# Use DFS to find auto remove keys
def dfs(k):
if k in autoremove_keys:
return
autoremove_keys.add(k)
if k not in old_values:
append_remove.add(k)
else:
oldv = old_values[k]
if oldv is not None and hasattr(oldv, 'kvdb_autoremove'):
for k2 in oldv.kvdb_autoremove():
dfs(k2)
for k,v in zip(updated_keys, updated_values):
if v is None:
dfs(k)
if append_remove:
raise _NeedMoreKeysException()
for k,v in remove_uniquekeys:
if v.getkey() not in updated_keyset and v.getkey() not in auto_remove_keys:
# This key is not updated, keep the indices untouched
continue
if k not in extrakeysdict:
raise _NeedMoreKeysException()
elif extrakeysdict[k] is not None and extrakeysdict[k].ref.getkey() == v.getkey():
# If the unique key does not reference to the correct object
# there may be an error, but we ignore this.
# Save in a temporary dictionary. We may restore it later.
tempdict[k] = extrakeysdict[k]
extrakeysdict[k] = None
setkey = UniqueKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = UniqueKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().discard(WeakReferenceObject(k))
for k,v in remove_multikeys:
if v.getkey() not in updated_keyset and v.getkey() not in auto_remove_keys:
# This key is not updated, keep the indices untouched
continue
if k not in extrakeysdict:
raise _NeedMoreKeysException()
else:
mk = extrakeysdict[k]
if mk is not None:
mk.set.dataset().discard(v)
if not mk.set.dataset():
tempdict[k] = extrakeysdict[k]
extrakeysdict[k] = None
setkey = MultiKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = MultiKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().discard(WeakReferenceObject(k))
for k,v in update_uniquekeys:
if k not in extrakeysdict:
raise _NeedMoreKeysException()
elif extrakeysdict[k] is not None and extrakeysdict[k].ref.getkey() != v.getkey():
raise AlreadyExistsException('Unique key conflict for %r and %r, with key %r' % \
(extrakeysdict[k].ref.getkey(), v.getkey(), k))
elif extrakeysdict[k] is None:
lv = tempdict.get(k, None)
if lv is not None and lv.ref.getkey() == v.getkey():
# Restore this value
nv = lv
else:
nv = UniqueKeyReference.create_from_key(k)
nv.ref = ReferenceObject(v.getkey())
extrakeysdict[k] = nv
setkey = UniqueKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = UniqueKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().add(nv.create_weakreference())
for k,v in update_multikeys:
if k not in extrakeysdict:
raise _NeedMoreKeysException()
else:
mk = extrakeysdict[k]
if mk is None:
mk = tempdict.get(k, None)
if mk is None:
mk = MultiKeyReference.create_from_key(k)
mk.set = DataObjectSet()
setkey = MultiKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = MultiKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().add(mk.create_weakreference())
mk.set.dataset().add(v)
extrakeysdict[k] = mk
except _NeedMoreKeysException:
# Prepare the keys
extra_keys[:] = list(set(itertools.chain((k for k,v in remove_uniquekeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(k for k,v in remove_multikeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(k for k,_ in update_uniquekeys),
(k for k,_ in update_multikeys))))
extra_key_set[:] = list(set(itertools.chain((UniqueKeyReference.get_keyset_from_key(k) for k,v in remove_uniquekeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(MultiKeyReference.get_keyset_from_key(k) for k,v in remove_multikeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(UniqueKeyReference.get_keyset_from_key(k) for k,_ in update_uniquekeys),
(MultiKeyReference.get_keyset_from_key(k) for k,_ in update_multikeys))))
auto_remove_keys.clear()
auto_remove_keys.update(autoremove_keys.difference(keys[:orig_len])
.difference(extra_keys)
.difference(extra_key_set))
raise
else:
extrakeys_list = list(extrakeysdict.items())
extrakeyset_list = list(extrakeysetdict.items())
autoremove_list = list(autoremove_keys.difference(updated_keys)
.difference(extrakeysdict.keys())
.difference(extrakeysetdict.keys()))
return (tuple(itertools.chain(updated_keys,
(k for k,_ in extrakeys_list),
(k for k,_ in extrakeyset_list),
autoremove_list)),
tuple(itertools.chain(updated_values,
(v for _,v in extrakeys_list),
(v for _,v in extrakeyset_list),
[None] * len(autoremove_list))))
def object_updater(keys, values, timestamp):
old_version = {}
for k, v in zip(keys, values):
if v is not None and hasattr(v, 'setkey'):
v.setkey(k)
if v is not None and hasattr(v, 'kvdb_createtime'):
old_version[k] = (getattr(v, 'kvdb_createtime'), getattr(v, 'kvdb_updateversion', 1))
updated_keys, updated_values = updater_with_key(keys, values, timestamp)
updated_ref[0] = tuple(updated_keys)
new_version = []
for k,v in zip(updated_keys, updated_values):
if v is None:
new_version.append((timestamp, -1))
elif k in old_version:
ov = old_version[k]
setattr(v, 'kvdb_createtime', ov[0])
setattr(v, 'kvdb_updateversion', ov[1] + 1)
new_version.append((ov[0], ov[1] + 1))
else:
setattr(v, 'kvdb_createtime', timestamp)
setattr(v, 'kvdb_updateversion', 1)
new_version.append((timestamp, 1))
updated_ref[1] = new_version
return (updated_keys, updated_values)
start_time = self.apiroutine.scheduler.current_time
retry_times = 1
while True:
try:
await call_api(self.apiroutine, 'kvstorage', 'updateallwithtime',
{'keys': keys + tuple(auto_remove_keys) + \
tuple(extra_keys) + tuple(extra_key_set),
'updater': object_updater})
except _NeedMoreKeysException:
if maxtime is not None and\
self.apiroutine.scheduler.current_time - start_time > maxtime:
raise TransactionTimeoutException
retry_times += 1
except Exception:
self._logger.debug("Transaction %r interrupted in %r retries", updater, retry_times)
raise
else:
self._logger.debug("Transaction %r done in %r retries", updater, retry_times)
break
# Short cut update notification
update_keys = self._watchedkeys.intersection(updated_ref[0])
self._updatekeys.update(update_keys)
for k,v in zip(updated_ref[0], updated_ref[1]):
k = _str(k)
if k in update_keys:
v = tuple(v)
oldv = self._update_version.get(k, (0, -1))
if oldv < v:
self._update_version[k] = v
if not self._requests:
# Fake notification
await self.apiroutine.wait_for_send(RetrieveRequestSend())
await self._notifier.publish(updated_ref[0], updated_ref[1])
async def gettimestamp(self):
"""
Get a timestamp from database server
"""
_timestamp = None
def _updater(keys, values, timestamp):
nonlocal _timestamp
_timestamp = timestamp
return ((), ())
await call_api(self.apiroutine, 'kvstorage', 'updateallwithtime',
{'keys': (),
'updater': _updater})
return _timestamp
def watchlist(self, requestid = None):
"""
Return a dictionary whose keys are database keys, and values are lists of request ids.
Optionally filtered by request id
"""
return dict((k,list(v)) for k,v in self._watches.items() if requestid is None or requestid in v)
async def walk(self, keys, walkerdict, requestid, nostale = False):
"""
Recursively retrieve keys with customized functions.
walkerdict is a dictionary ``key->walker(key, obj, walk, save)``.
"""
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'walk', dict(walkerdict), requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def asynctransact(self, asyncupdater, withtime = False,
maxretry = None, maxtime=60):
"""
Read-Write transaction with asynchronous operations.
First, the `asyncupdater` is called with `asyncupdater(last_info, container)`.
`last_info` is the info from last `AsyncTransactionLockException`.
When `asyncupdater` is called for the first time, last_info = None.
The async updater should be an async function, and return
`(updater, keys)`. The `updater` should
be a valid updater function used in `transaction` API. `keys` will
be the keys used in the transaction.
The async updater can return None to terminate the transaction
without exception.
After the call, a transaction is automatically started with the
return values of `asyncupdater`.
`updater` can raise `AsyncTransactionLockException` to restart
the transaction from `asyncupdater`.
:param asyncupdater: An async updater `asyncupdater(last_info, container)`
which returns `(updater, keys)`
:param withtime: Whether the returned updater need a timestamp
:param maxretry: Limit the max retried times
:param maxtime: Limit the execution time. The transaction is abandoned
if still not completed after `maxtime` seconds.
"""
start_time = self.apiroutine.scheduler.current_time
def timeleft():
if maxtime is None:
return None
else:
time_left = maxtime + start_time - \
self.apiroutine.scheduler.current_time
if time_left <= 0:
raise TransactionTimeoutException
else:
return time_left
retry_times = 0
last_info = None
while True:
timeout, r = \
await self.apiroutine.execute_with_timeout(
timeleft(),
asyncupdater(last_info, self.apiroutine)
)
if timeout:
raise TransactionTimeoutException
if r is None:
return
updater, keys = r
try:
await self.transact(keys, updater, withtime, timeleft())
except AsyncTransactionLockException as e:
retry_times += 1
if maxretry is not None and retry_times > maxretry:
raise TransactionRetryExceededException
# Check time left
timeleft()
last_info = e.info
except Exception:
self._logger.debug("Async transaction %r interrupted in %r retries", asyncupdater, retry_times + 1)
raise
else:
self._logger.debug("Async transaction %r done in %r retries", asyncupdater, retry_times + 1)
break
async def writewalk(self, keys, walker, withtime = False, maxtime = 60):
"""
A read-write transaction with walkers
:param keys: initial keys used in walk. Provide keys already known to
be necessary to optimize the transaction.
:param walker: A walker should be `walker(walk, write)`,
where `walk` is a function `walk(key)->value`
to get a value from the database, and
`write` is a function `write(key, value)`
to save value to the database.
A value can be write to a database any times.
A `walk` called after `write` is guaranteed
to retrieve the previously written value.
:param withtime: if withtime=True, an extra timestamp parameter is given to
walkers, so walker should be
`walker(walk, write, timestamp)`
:param maxtime: max execution time of this transaction
"""
@functools.wraps(walker)
async def _asyncwalker(last_info, container):
return (keys, walker)
return await self.asyncwritewalk(_asyncwalker, withtime, maxtime)
async def asyncwritewalk(self, asyncwalker, withtime = False, maxtime = 60):
"""
A read-write transaction with walker factory
:param asyncwalker: an async function called as `asyncwalker(last_info, container)`
and returns (keys, walker), which
are the same as parameters of `writewalk`
:param keys: initial keys used in walk
:param walker: A walker should be `walker(walk, write)`,
where `walk` is a function `walk(key)->value`
to get a value from the database, and
`write` is a function `write(key, value)`
to save value to the database.
A value can be write to a database any times.
A `walk` called after `write` is guaranteed
to retrieve the previously written value.
raise AsyncTransactionLockException in walkers
to restart the transaction
:param withtime: if withtime=True, an extra timestamp parameter is given to
walkers, so walkers should be
`walker(key, value, walk, write, timestamp)`
:param maxtime: max execution time of this transaction
"""
@functools.wraps(asyncwalker)
async def _asyncupdater(last_info, container):
if last_info is not None:
from_walker, real_info = last_info
if not from_walker:
keys, orig_keys, walker = real_info
else:
r = await asyncwalker(real_info, container)
if r is None:
return None
keys, walker = r
orig_keys = keys
else:
r = await asyncwalker(None, container)
if r is None:
return None
keys, walker = r
orig_keys = keys
@functools.wraps(walker)
def _updater(keys, values, timestamp):
_stored_objs = dict(zip(keys, values))
if self.debuggingupdater:
_stored_old_values = {k: v.jsonencode()
for k,v in zip(keys, values)
if hasattr(v, 'jsonencode')}
# Keys written by walkers
_walker_write_dict = {}
_lost_keys = set()
_used_keys = set()
def _walk(key):
if key not in _stored_objs:
_lost_keys.add(key)
raise WalkKeyNotRetrieved(key)
else:
if key not in _walker_write_dict:
_used_keys.add(key)
return _stored_objs[key]
def _write(key, value):
_walker_write_dict[key] = value
_stored_objs[key] = value
try:
if withtime:
walker(_walk, _write, timestamp)
else:
walker(_walk, _write)
except AsyncTransactionLockException as e:
raise AsyncTransactionLockException((True, e.info))
if _lost_keys:
_lost_keys.update(_used_keys)
_lost_keys.update(orig_keys)
raise AsyncTransactionLockException((False, (_lost_keys, orig_keys, walker)))
if self.debuggingupdater:
# Check if there are changes not written
for k, v in _stored_old_values.items():
if k not in _walker_write_dict:
v2 = _stored_objs[k]
assert hasattr(v2, 'jsonencode') and v2.jsonencode() == v
if _walker_write_dict:
return tuple(zip(*_walker_write_dict.items()))
else:
return (), ()
return (_updater, keys)
return await self.asynctransact(_asyncupdater, True, maxtime=maxtime)
| apache-2.0 | -3,423,474,057,507,028,500 | 50.19209 | 198 | 0.461097 | false |
hhursev/recipe-scraper | recipe_scrapers/cucchiaio.py | 1 | 1059 | from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields
class Cucchiaio(AbstractScraper):
@classmethod
def host(cls):
return "cucchiaio.it"
def author(self):
return self.schema.author()
def title(self):
return self.schema.title()
def total_time(self):
block = self.soup.find("div", {"class": "scheda-ricetta-new"})
if block:
return sum(map(get_minutes, block.findAll("tr")))
return 0
def yields(self):
header = self.soup.find("td", text="PORZIONI")
if header:
value = header.find_next("td")
return get_yields(value)
return None
def image(self):
data = self.soup.find("div", {"class": "auto"}).find("img", {"class": "image"})
if data:
data = data.get("src")
return data
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
def ratings(self):
return None
| mit | 4,176,352,788,989,998,000 | 24.214286 | 87 | 0.581681 | false |
googleapis/googleapis-gen | google/cloud/dialogflow/v2/dialogflow-v2-py/google/cloud/dialogflow_v2/services/conversations/async_client.py | 1 | 26721 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflow_v2.services.conversations import pagers
from google.cloud.dialogflow_v2.types import conversation
from google.cloud.dialogflow_v2.types import conversation as gcd_conversation
from google.cloud.dialogflow_v2.types import participant
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import ConversationsTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ConversationsGrpcAsyncIOTransport
from .client import ConversationsClient
class ConversationsAsyncClient:
"""Service for managing
[Conversations][google.cloud.dialogflow.v2.Conversation].
"""
_client: ConversationsClient
DEFAULT_ENDPOINT = ConversationsClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ConversationsClient.DEFAULT_MTLS_ENDPOINT
conversation_path = staticmethod(ConversationsClient.conversation_path)
parse_conversation_path = staticmethod(ConversationsClient.parse_conversation_path)
conversation_profile_path = staticmethod(ConversationsClient.conversation_profile_path)
parse_conversation_profile_path = staticmethod(ConversationsClient.parse_conversation_profile_path)
message_path = staticmethod(ConversationsClient.message_path)
parse_message_path = staticmethod(ConversationsClient.parse_message_path)
common_billing_account_path = staticmethod(ConversationsClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ConversationsClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ConversationsClient.common_folder_path)
parse_common_folder_path = staticmethod(ConversationsClient.parse_common_folder_path)
common_organization_path = staticmethod(ConversationsClient.common_organization_path)
parse_common_organization_path = staticmethod(ConversationsClient.parse_common_organization_path)
common_project_path = staticmethod(ConversationsClient.common_project_path)
parse_common_project_path = staticmethod(ConversationsClient.parse_common_project_path)
common_location_path = staticmethod(ConversationsClient.common_location_path)
parse_common_location_path = staticmethod(ConversationsClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversationsAsyncClient: The constructed client.
"""
return ConversationsClient.from_service_account_info.__func__(ConversationsAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversationsAsyncClient: The constructed client.
"""
return ConversationsClient.from_service_account_file.__func__(ConversationsAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConversationsTransport:
"""Returns the transport used by the client instance.
Returns:
ConversationsTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ConversationsClient).get_transport_class, type(ConversationsClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ConversationsTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the conversations client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ConversationsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ConversationsClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_conversation(self,
request: gcd_conversation.CreateConversationRequest = None,
*,
parent: str = None,
conversation: gcd_conversation.Conversation = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_conversation.Conversation:
r"""Creates a new conversation. Conversations are auto-completed
after 24 hours.
Conversation Lifecycle: There are two stages during a
conversation: Automated Agent Stage and Assist Stage.
For Automated Agent Stage, there will be a dialogflow agent
responding to user queries.
For Assist Stage, there's no dialogflow agent responding to user
queries. But we will provide suggestions which are generated
from conversation.
If
[Conversation.conversation_profile][google.cloud.dialogflow.v2.Conversation.conversation_profile]
is configured for a dialogflow agent, conversation will start
from ``Automated Agent Stage``, otherwise, it will start from
``Assist Stage``. And during ``Automated Agent Stage``, once an
[Intent][google.cloud.dialogflow.v2.Intent] with
[Intent.live_agent_handoff][google.cloud.dialogflow.v2.Intent.live_agent_handoff]
is triggered, conversation will transfer to Assist Stage.
Args:
request (:class:`google.cloud.dialogflow_v2.types.CreateConversationRequest`):
The request object. The request message for
[Conversations.CreateConversation][google.cloud.dialogflow.v2.Conversations.CreateConversation].
parent (:class:`str`):
Required. Resource identifier of the project creating
the conversation. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
conversation (:class:`google.cloud.dialogflow_v2.types.Conversation`):
Required. The conversation to create.
This corresponds to the ``conversation`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Conversation:
Represents a conversation.
A conversation is an interaction between
an agent, including live agents and
Dialogflow agents, and a support
customer. Conversations can include
phone calls and text-based chat
sessions.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, conversation])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = gcd_conversation.CreateConversationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if conversation is not None:
request.conversation = conversation
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_conversation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_conversations(self,
request: conversation.ListConversationsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConversationsAsyncPager:
r"""Returns the list of all conversations in the
specified project.
Args:
request (:class:`google.cloud.dialogflow_v2.types.ListConversationsRequest`):
The request object. The request message for
[Conversations.ListConversations][google.cloud.dialogflow.v2.Conversations.ListConversations].
parent (:class:`str`):
Required. The project from which to list all
conversation. Format:
``projects/<Project ID>/locations/<Location ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.conversations.pagers.ListConversationsAsyncPager:
The response message for
[Conversations.ListConversations][google.cloud.dialogflow.v2.Conversations.ListConversations].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.ListConversationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_conversations,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListConversationsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_conversation(self,
request: conversation.GetConversationRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversation.Conversation:
r"""Retrieves the specific conversation.
Args:
request (:class:`google.cloud.dialogflow_v2.types.GetConversationRequest`):
The request object. The request message for
[Conversations.GetConversation][google.cloud.dialogflow.v2.Conversations.GetConversation].
name (:class:`str`):
Required. The name of the conversation. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Conversation:
Represents a conversation.
A conversation is an interaction between
an agent, including live agents and
Dialogflow agents, and a support
customer. Conversations can include
phone calls and text-based chat
sessions.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.GetConversationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_conversation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def complete_conversation(self,
request: conversation.CompleteConversationRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversation.Conversation:
r"""Completes the specified conversation. Finished
conversations are purged from the database after 30
days.
Args:
request (:class:`google.cloud.dialogflow_v2.types.CompleteConversationRequest`):
The request object. The request message for
[Conversations.CompleteConversation][google.cloud.dialogflow.v2.Conversations.CompleteConversation].
name (:class:`str`):
Required. Resource identifier of the conversation to
close. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.types.Conversation:
Represents a conversation.
A conversation is an interaction between
an agent, including live agents and
Dialogflow agents, and a support
customer. Conversations can include
phone calls and text-based chat
sessions.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.CompleteConversationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.complete_conversation,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_messages(self,
request: conversation.ListMessagesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListMessagesAsyncPager:
r"""Lists messages that belong to a given conversation. ``messages``
are ordered by ``create_time`` in descending order. To fetch
updates without duplication, send request with filter
``create_time_epoch_microseconds > [first item's create_time of previous request]``
and empty page_token.
Args:
request (:class:`google.cloud.dialogflow_v2.types.ListMessagesRequest`):
The request object. The request message for
[Conversations.ListMessages][google.cloud.dialogflow.v2.Conversations.ListMessages].
parent (:class:`str`):
Required. The name of the conversation to list messages
for. Format:
``projects/<Project ID>/locations/<Location ID>/conversations/<Conversation ID>``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflow_v2.services.conversations.pagers.ListMessagesAsyncPager:
The response message for
[Conversations.ListMessages][google.cloud.dialogflow.v2.Conversations.ListMessages].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = conversation.ListMessagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_messages,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListMessagesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ConversationsAsyncClient",
)
| apache-2.0 | -8,202,350,069,784,333,000 | 41.7536 | 138 | 0.6258 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.