prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2017 Atsushi Miyake
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or http://apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to those terms.
mod enigma;
mod substitution_table;
mod router;<|fim▁hole|>
pub use self::enigma::Enigma;
pub use self::router::Router;
pub use self::router::RouterProtocol;
pub use self::router::Digit;
pub use self::router_manager::RouterManager;
pub use self::reflector::Reflector;
pub use self::substitution_table::SubstitutionTable;
pub use self::plugboard::Plugboard;
pub use self::alphabets::ALPHABETS;
pub use self::alphabets::SUBSTITUTION_TABLE1;
pub use self::alphabets::SUBSTITUTION_TABLE2;
pub use self::alphabets::SUBSTITUTION_TABLE3;
pub use self::alphabets::REFLECTOR;
pub use self::alphabets::PLUGBOARD;<|fim▁end|> | mod router_manager;
mod reflector;
mod plugboard;
mod alphabets; |
<|file_name|>recipe-137551.py<|end_file_name|><|fim▁begin|>RegObj.dll is an ActiveX server--and, hence, has an automation interface--that is available with documentation in
the distribution file known as RegObji.exe, from the following page:
http://msdn.microsoft.com/vbasic/downloads/addins.asp
To provide early binding for RegObj use
>>> from win32com.client import gencache
>>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0)
or the MakePy utility within PythonWin, referring to "Regstration Manipulation Classes (1.0)" (Please notice
the spelling error.)
Sample use, to determine what command is associated with a Python file:
>>> from win32com.client import Dispatch, gencache
>>> from win32con import HKEY_CLASSES_ROOT
>>> gencache.EnsureModule('{DE10C540-810E-11CF-BBE7-444553540000}', 0, 1, 0)
>>> regobj = Dispatch ( 'RegObj.Registry' )
<|fim▁hole|>u'J:\\Python22\\pythonw.exe "%1" %*'<|fim▁end|> | >>> HKCR = regobj.RegKeyFromHKey ( HKEY_CLASSES_ROOT )
>>> PythonFileKey = HKCR.ParseKeyName('Python.File\Shell\Open\command')
>>> PythonFileKey.Value |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Support for Hangouts."""
import logging
from hangups.auth import GoogleAuthError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.conversation.util import create_matcher
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.helpers import dispatcher, intent
import homeassistant.helpers.config_validation as cv
# We need an import from .config_flow, without it .config_flow is never loaded.
from .config_flow import HangoutsFlowHandler # noqa: F401
from .const import (
CONF_BOT,
CONF_DEFAULT_CONVERSATIONS,
CONF_ERROR_SUPPRESSED_CONVERSATIONS,
CONF_INTENTS,
CONF_MATCHERS,
CONF_REFRESH_TOKEN,
CONF_SENTENCES,
DOMAIN,
EVENT_HANGOUTS_CONNECTED,
EVENT_HANGOUTS_CONVERSATIONS_CHANGED,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED,
INTENT_HELP,
INTENT_SCHEMA,
MESSAGE_SCHEMA,
SERVICE_RECONNECT,
SERVICE_SEND_MESSAGE,
SERVICE_UPDATE,
TARGETS_SCHEMA,
)
from .hangouts_bot import HangoutsBot
from .intents import HelpIntent
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_INTENTS, default={}): vol.Schema(
{cv.string: INTENT_SCHEMA}
),
vol.Optional(CONF_DEFAULT_CONVERSATIONS, default=[]): [TARGETS_SCHEMA],<|fim▁hole|> vol.Optional(CONF_ERROR_SUPPRESSED_CONVERSATIONS, default=[]): [
TARGETS_SCHEMA
],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Hangouts bot component."""
if (config := config.get(DOMAIN)) is None:
hass.data[DOMAIN] = {
CONF_INTENTS: {},
CONF_DEFAULT_CONVERSATIONS: [],
CONF_ERROR_SUPPRESSED_CONVERSATIONS: [],
}
return True
hass.data[DOMAIN] = {
CONF_INTENTS: config[CONF_INTENTS],
CONF_DEFAULT_CONVERSATIONS: config[CONF_DEFAULT_CONVERSATIONS],
CONF_ERROR_SUPPRESSED_CONVERSATIONS: config[
CONF_ERROR_SUPPRESSED_CONVERSATIONS
],
}
if (
hass.data[DOMAIN][CONF_INTENTS]
and INTENT_HELP not in hass.data[DOMAIN][CONF_INTENTS]
):
hass.data[DOMAIN][CONF_INTENTS][INTENT_HELP] = {CONF_SENTENCES: ["HELP"]}
for data in hass.data[DOMAIN][CONF_INTENTS].values():
matchers = []
for sentence in data[CONF_SENTENCES]:
matchers.append(create_matcher(sentence))
data[CONF_MATCHERS] = matchers
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass: HomeAssistant, config: ConfigEntry) -> bool:
"""Set up a config entry."""
try:
bot = HangoutsBot(
hass,
config.data.get(CONF_REFRESH_TOKEN),
hass.data[DOMAIN][CONF_INTENTS],
hass.data[DOMAIN][CONF_DEFAULT_CONVERSATIONS],
hass.data[DOMAIN][CONF_ERROR_SUPPRESSED_CONVERSATIONS],
)
hass.data[DOMAIN][CONF_BOT] = bot
except GoogleAuthError as exception:
_LOGGER.error("Hangouts failed to log in: %s", str(exception))
return False
dispatcher.async_dispatcher_connect(
hass, EVENT_HANGOUTS_CONNECTED, bot.async_handle_update_users_and_conversations
)
dispatcher.async_dispatcher_connect(
hass, EVENT_HANGOUTS_CONVERSATIONS_CHANGED, bot.async_resolve_conversations
)
dispatcher.async_dispatcher_connect(
hass,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED,
bot.async_update_conversation_commands,
)
config.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, bot.async_handle_hass_stop)
)
await bot.async_connect()
hass.services.async_register(
DOMAIN,
SERVICE_SEND_MESSAGE,
bot.async_handle_send_message,
schema=MESSAGE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_UPDATE,
bot.async_handle_update_users_and_conversations,
schema=vol.Schema({}),
)
hass.services.async_register(
DOMAIN, SERVICE_RECONNECT, bot.async_handle_reconnect, schema=vol.Schema({})
)
intent.async_register(hass, HelpIntent(hass))
return True
async def async_unload_entry(hass: HomeAssistant, _: ConfigEntry) -> bool:
"""Unload a config entry."""
bot = hass.data[DOMAIN].pop(CONF_BOT)
await bot.async_disconnect()
return True<|fim▁end|> | |
<|file_name|>animation.py<|end_file_name|><|fim▁begin|>"""Animation.
Animation is set of keyframes.
Value of selected attribute changes in time.
Keyframe:
(time, value)
Objects have animation manager which manages animation graph and switching."""
from operator import itemgetter
from eaf import Timer
from xoinvader.utils import Point
class AnimationBoundariesExceeded(Exception):
"""Exception to show that interpolated value will be incorrect."""
def __init__(self, first, current_time, second):
super(AnimationBoundariesExceeded, self).__init__(
self,
f"Animation frame boundaries exceeded: {first} <= {current_time} <= {second}",
)
class InterpolationUnknownTypes(Exception):
"""Such type combination is unsupported."""
def __init__(self, first, second):
super(InterpolationUnknownTypes, self).__init__(
self, f"Unknown types of interpolating values: {first} and {second}"
)
# TODO: Implement animation graph and etc
class AnimationManager(object):
"""Manage list of object animation."""
def __init__(self):
self._animations = {}
self._animation = None
@property
def animation(self):
"""AnimationManager's current animation name.
To set animation - assign it's name.
:getter: yes
:setter: yes
:type: str
"""
if self._animation:
return self._animation.name
else:
raise AttributeError("There is no available animation.")
@animation.setter
def animation(self, name):
if name in self._animations:
self._animation = self._animations[name]
else:
raise ValueError(f"No such animation: '{name}'.")
def add(self, name, *args, **kwargs):
"""Add new animation, pass args to Animation class.
See interface of `class::xoinvader.animation.Animation`.
:param str name: animation name
"""
animation = Animation(name, *args, **kwargs)
self._animations[name] = animation
if not self._animation:
self._animation = animation
def update(self, dt):
"""Update manager's state."""
if not self._animation:
return
try:
self._animation.update(dt)
except StopIteration:
return # TODO: think about method to change animation
# pylint: disable=too-many-instance-attributes,too-many-arguments
# pylint: disable=too-few-public-methods
class Animation(object):
"""Animation unit.
Animation object holds sorted list of (time, value) items and changes
selected attribute of bound object according to local animation time.
Time measured by timer. When current time is greater or equal then time
of next keyframe - animation object changes it to appropriate value.
When animation is done and if not looped - raise StopIteration.
In case of interpolated animation value calculation occurs within two
bounding frames and on frame switch.
:param str name: animation name
:param object bind: object to bind animation
:param str attr: attribute to change in frames
:param list keyframes: (float, object) tuples
:param bool interp: interpolate values between frames or not
:param bool loop: loop animation or not
"""
def __init__(self, name, bind, attr, keyframes, interp=False, loop=False):
self._name = name
self._obj = bind
self._attr = attr
if not keyframes:
raise ValueError("Animation keyframes must not be empty.")
self._keyframes = sorted(keyframes, key=itemgetter(0))
self._interp = interp
self._loop = loop
# Timer for tracking local time
self._timer = Timer(self._keyframes[-1][0], lambda: True)
self._timer.start()
# Current keyframe index
self._current = 0
if self._interp:
self.update = self._update_interpolated
else:
self.update = self._update_discrete
@property
def name(self):
"""Animation's name.
:getter: yes
:setter: no
:type: str
"""
return self._name
def _apply_value(self, value):
"""Apply new value to linked object.
:param obj value: value to apply
"""
setattr(self._obj, self._attr, value)
def _update_interpolated(self, dt):
"""Advance animation and interpolate value.
NOTE: animation frame switching depends on interp mode
animation with interpolation switches frame only when current local
time exceeds NEXT frames' time border.
"""
self._check_animation_state()
self._timer.update(dt)
current_time = self._timer.elapsed
keyframe = self._keyframes[self._current]
next_keyframe = self._keyframes[self._current + 1]
# it's time to switch keyframe
if current_time >= next_keyframe[0]:
self._current += 1
keyframe = self._keyframes[self._current]
if self._current == len(self._keyframes) - 1:
self._apply_value(keyframe[1])
self._current += 1
self._check_animation_state()
return
next_keyframe = self._keyframes[self._current + 1]
value = interpolate(keyframe, next_keyframe, current_time)
self._apply_value(value)
def _update_discrete(self, dt):
"""Advance animation without interpolating value.
NOTE: animation frame switching depends on interp mode
discrete animation swiches frame and updates value only if
current local time is >= time of current keyframe.
No need to worry about calculating value between frames - thus
no need to complicate behaviour.
"""
self._check_animation_state()
self._timer.update(dt)
keyframe = self._keyframes[self._current]
# Check if animation need to switch keyframe
if self._timer.elapsed >= keyframe[0]:
self._apply_value(keyframe[1])
self._current += 1
def _check_animation_state(self):
"""Check animation state and restart if needed.
:raise StopIteration: when animation exceeded frames.
"""
if len(self._keyframes) == self._current:
if self._loop:
self._current = 0
self._timer.restart()
else:
self._timer.stop()
raise StopIteration
def linear_equation(val1, val2, time1, time2, current_time):
"""Linear equation to get interpolated value.
:param float val1: first keyframe value
:param float val2: second keyframe value
:param float time1: first keyframe local time
:param float time2: second keyframe local time
:param float current_time: current animation local time
"""
return val1 + (val2 - val1) / (time2 - time1) * (current_time - time1)
def same_type(values, types):
"""Check if values are belongs to same type or type tuple.
:param collections.Iterable values: values to check type similarity
:param tuple|type types: type or tuple of types
"""
return all(map(lambda it: isinstance(it, types), values))
def interpolate(first, second, current_time):
"""Interpolate value by two bounding keyframes.
:param collections.Iterable first: first bounding keyframe
:param collections.Iterable second: second bounding keyframe
:param float current_time: current animation local time
:raises AnimationBoundariesExceeded: when time interval is invalid
:raises InterpolationUnknownTypes: when interpolating invalid types
"""
if not first[0] <= current_time <= second[0]:
raise AnimationBoundariesExceeded(first[0], current_time, second[0])
def frames_of(*args):
"""If frames both of specified type."""
return same_type((first[1], second[1]), args)
if frames_of(int, float):
value = linear_equation(
float(first[1]),
float(second[1]),
float(first[0]),<|fim▁hole|> elif frames_of(Point):
value = linear_equation(
first[1],
second[1],
float(first[0]),
float(second[0]),
float(current_time),
)
else:
raise InterpolationUnknownTypes(type(first[1]), type(second[1]))
return value<|fim▁end|> | float(second[0]),
float(current_time),
)
|
<|file_name|>postgres.js<|end_file_name|><|fim▁begin|>var _ = require('lodash')
var assert = require('assert')
var common = require('./common')
module.exports = function() {
var adapter = {
wrap: column => `"${column}"`
}
adapter.createTimestamps = function(data, options) {
options = options || {}
var table = this.wrap(data.identity.name)
var schema = options.schema || 'public'
return this.db
.query(
'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE ' +
"TABLE_NAME='" +
data.identity.name +
"' AND COLUMN_NAME='updated_at' AND " +
"TABLE_CATALOG=current_database() AND TABLE_SCHEMA='" +
schema +
"'",
null,
options
)
.then(recordset => {
if (recordset.length === 0) {
return this.db.execute(
`ALTER TABLE ${table} ADD ${this.wrap(
'updated_at'
)} TIMESTAMP(3) WITHOUT TIME ZONE`,<|fim▁hole|> null,
options
)
}
})
}
adapter.buildInsertCommand = function(data) {
data.insertCommand = `INSERT INTO ${this.wrap(
data.identity.name
)} (<fields>${data.timestamps ? `,"updated_at"` : ''}) VALUES (<values>${
data.timestamps ? `,(now() at time zone 'utc')` : ''
}) RETURNING *`
}
adapter.buildUpdateCommand = function(data) {
data.updateCommand = `UPDATE ${this.wrap(
data.identity.name
)} SET <fields-values>${
data.timestamps ? `,"updated_at"=(now() at time zone 'utc')` : ''
} WHERE <primary-keys> RETURNING *`
}
adapter.buildDeleteCommand = function(data) {
data.deleteCommand =
'DELETE FROM ' +
this.wrap(data.identity.name) +
' WHERE <find-keys> RETURNING *'
}
adapter.create = common.create
adapter.update = common.update
adapter.destroy = common.destroy
adapter.extractRecordset = function(jsonset, coerce) {
assert(_.isArray(jsonset), 'jsonset is not an array')
_.forEach(jsonset, function(record) {
coerce.map(function(coercion) {
const value = record[coercion.property]
if (value === void 0) {
record[coercion.property] = null
} else if (value !== null) {
record[coercion.property] = coercion.fn(record[coercion.property])
}
})
})
return jsonset
}
adapter.buildQuery = function buildQuery(data, options, isAssociation) {
var fields = []
_.forEach(
data.properties,
function(property, name) {
var fieldName = property.field || name
var alias = name
fields.push(
this.wrap(fieldName) +
(alias !== fieldName ? ' AS ' + this.wrap(alias) : '')
)
if (
options.fetchExternalDescription &&
property.display &&
property.schema &&
property.schema.$ref &&
property.schema.key
) {
let display = property.display
const point = display.indexOf('.')
if (point > -1) {
display = display.substr(point + 1)
}
fields.push(
`(select "${display}" FROM "${property.schema.$ref}" where "${
property.schema.key
}"="${data.key}"."${fieldName}") as "${_.camelCase(
`${data.identity.name} ${name} ${display}`
)}"`
)
}
}.bind(this)
)
if (data.timestamps) {
fields.push(this.wrap('updated_at'))
}
_.forEach(
data.associations,
function(association) {
if (!association.data.foreignKey) {
return false
}
const query = this.buildQuery(association.data, options, true)
var foreignKey =
association.data.properties[association.data.foreignKey].field ||
association.data.foreignKey
fields.push(
'(select array_to_json(array_agg(row_to_json(t))) from (' +
query +
' WHERE ' +
this.wrap(foreignKey) +
'=' +
this.wrap(data.key) +
'.' +
this.wrap(data.primaryKeyFields[0]) +
' ORDER BY ' +
(
association.data.primaryOrderFields ||
association.data.primaryKeyFields
)
.map(this.wrap.bind(this))
.join() +
') t) AS ' +
this.wrap(association.data.key)
)
}.bind(this)
)
let fetchCommand =
'SELECT ' +
fields.join(',') +
' FROM ' +
this.wrap(data.identity.name) +
' AS ' +
this.wrap(data.key)
if (options.schema && !isAssociation) {
fetchCommand = fetchCommand.replace(
/" FROM "/g,
`" FROM ${options.schema}."`
)
}
return fetchCommand
}
adapter.getCoercionFunction = function(type, timezone) {
switch (type) {
case 'datetime':
return function(value) {
if (timezone === 'ignore') {
var d = new Date(value + 'Z')
return new Date(d.getTime() + d.getTimezoneOffset() * 60000)
} else {
return new Date(value)
}
}
default:
return function(value) {
return value
}
}
}
return adapter
}<|fim▁end|> | |
<|file_name|>iconListInfo.js<|end_file_name|><|fim▁begin|>// JSON Object of all of the icons and their tags
export default {
apple : {
name : 'apple',
color: '#be0000',
image : 'apple67.svg',
tags: ['apple', 'fruit', 'food'],
categories: ['food', 'supermarket']
},
bread : {
name : 'bread',
color: '#c26b24',
image : 'bread14.svg',
tags: ['bread', 'food', 'wheat', 'bake'],
categories: ['food', 'supermarket']
},
broccoli : {
name : 'broccoli',
color: '#16a900',
image : 'broccoli.svg',
tags: ['broccoli', 'food', 'vegetable'],
categories: ['food', 'supermarket']
},
cheese : {
name : 'cheese',
color: '#ffe625',
image : 'cheese7.svg',
tags: ['cheese', 'food', 'dairy'],
categories: ['food', 'supermarket']
},<|fim▁hole|> name : 'shopping',
color: '#f32393',
image : 'shopping225.svg',
tags: ['shopping', 'bag'],
categories: ['shopping', 'supermarket']
},
cart : {
name : 'cart',
color: '#9ba990',
image : 'supermarket1.svg',
tags: ['cart', 'shopping'],
categories: ['shopping', 'supermarket']
},
fish : {
name : 'fish',
color: '#6d7ca9',
image : 'fish52.svg',
tags: ['fish', 'food'],
categories: ['fish', 'supermarket']
},
giftbox : {
name : 'giftbox',
color: '#f32393',
image : 'giftbox56.svg',
tags: ['giftbox', 'gift', 'present'],
categories: ['gift', 'shopping', 'supermarket']
}
};<|fim▁end|> | shopping : { |
<|file_name|>test_metadata.py<|end_file_name|><|fim▁begin|>import os
from shutil import copyfile
from photomanip.metadata import ImageExif, SetExifTool
from nose import tools
ORIGINAL_IMAGE_FILENAME = 'photomanip/tests/turd_ferguson.jpeg'
TEST_IMAGE_FILENAME = 'photomanip/tests/image_exif_test.jpg'
ORIGINAL_PHOTO_FILENAME = 'photomanip/tests/test_photo_0.jpg'
TEST_PHOTO_01_FILENAME = 'photomanip/tests/image_exposure_test_01.jpg'
TEST_PHOTO_02_FILENAME = 'photomanip/tests/image_exposure_test_02.jpg'
class TestImageExif:
@classmethod
def setup_class(cls):
cls.image_exif = ImageExif()
copyfile(ORIGINAL_IMAGE_FILENAME, TEST_IMAGE_FILENAME)
copyfile(ORIGINAL_PHOTO_FILENAME, TEST_PHOTO_01_FILENAME)
copyfile(ORIGINAL_PHOTO_FILENAME, TEST_PHOTO_02_FILENAME)
@classmethod
def teardown_class(cls):
os.remove(TEST_IMAGE_FILENAME)
os.remove(TEST_PHOTO_01_FILENAME)
os.remove(TEST_PHOTO_02_FILENAME)
def get_stored_tags(self, tag_list, filename):<|fim▁hole|> stored_tags = et.get_tags(tag_list, filename)
return stored_tags
def test_imageexif_generate_tag_list(self):
get_list = self.image_exif.get_list
# test get list
tag_list = self.image_exif._generate_tag_list(get_list)
tools.eq_(set(tag_list), set([
'EXIF:DateTimeOriginal',
'File:ImageHeight',
'IPTC:Keywords',
'EXIF:ExposureTime',
'File:ImageWidth']))
# test set list
tag_list = self.image_exif._generate_tag_list(get_list, True)
tools.eq_(tag_list, {
'date_created': 'EXIF:DateTimeOriginal={}',
'exposure_time': 'EXIF:ExposureTime={}',
'image_height': 'File:ImageHeight={}',
'image_width': 'File:ImageWidth={}',
'keywords': 'IPTC:Keywords={}'})
def test_set_image_metadata(self):
output_meta = {
"name": "Terd Ferguson",
"keywords": "one, two, three",
"caption": "suck it, trebeck",
}
result = self.image_exif.set_image_metadata(TEST_IMAGE_FILENAME,
output_meta)
tools.eq_(result, '1 image files updated\n')
check_tags = self.image_exif._generate_tag_list(output_meta.keys())
stored_tags = self.get_stored_tags(check_tags, TEST_IMAGE_FILENAME)
# now check if the metadata matches
for key, val in output_meta.items():
mapped_key = self.image_exif.metadata_map[key]
tools.eq_(val, stored_tags[mapped_key])
def test_calculate_exposure_time(self):
tag_list = self.image_exif._generate_tag_list(['exposure_time'])
stored_tags = self.get_stored_tags(tag_list, TEST_PHOTO_01_FILENAME)
tools.eq_(stored_tags['EXIF:ExposureTime'], 0.001333333333)
def test_get_tags_containing(self):
tag_list = self.image_exif._generate_tag_list(['keywords'])
stored_tags = self.get_stored_tags(tag_list, TEST_PHOTO_01_FILENAME)
result = self.image_exif.get_tags_containing(
stored_tags['IPTC:Keywords'], 'faceit365')
tools.eq_(result, 'faceit365:date=20190308')
def test_get_metadata_batch(self):
fname_list = [TEST_PHOTO_01_FILENAME, TEST_PHOTO_02_FILENAME]
meta_list = self.image_exif.get_metadata_batch(fname_list)
meta_list[0].pop('SourceFile')
meta_list[1].pop('SourceFile')
tools.eq_(meta_list[0], meta_list[1])<|fim▁end|> | with SetExifTool() as et: |
<|file_name|>GameScreen.java<|end_file_name|><|fim▁begin|>package com.cardshifter.gdx.screens;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.Screen;
import com.badlogic.gdx.math.Rectangle;
import com.badlogic.gdx.math.Vector2;
import com.badlogic.gdx.scenes.scene2d.Actor;
import com.badlogic.gdx.scenes.scene2d.InputEvent;
import com.badlogic.gdx.scenes.scene2d.ui.*;
import com.badlogic.gdx.scenes.scene2d.utils.ClickListener;
import com.cardshifter.api.incoming.UseAbilityMessage;
import com.cardshifter.api.messages.Message;
import com.cardshifter.api.outgoing.*;
import com.cardshifter.gdx.*;
import com.cardshifter.gdx.ui.CardshifterClientContext;
import com.cardshifter.gdx.ui.EntityView;
import com.cardshifter.gdx.ui.PlayerView;
import com.cardshifter.gdx.ui.cards.CardView;
import com.cardshifter.gdx.ui.cards.CardViewSmall;
import com.cardshifter.gdx.ui.zones.CompactHiddenZoneView;
import com.cardshifter.gdx.ui.zones.DefaultZoneView;
import com.cardshifter.gdx.ui.zones.ZoneView;
import java.util.*;
import java.util.List;
/**
* Created by Simon on 1/31/2015.
*/
public class GameScreen implements Screen, TargetableCallback {
private final CardshifterGame game;
private final CardshifterClient client;
private final int playerIndex;
private final int gameId;
private final Table table;
private final Map<Integer, ZoneView> zoneViews = new HashMap<Integer, ZoneView>();
private final Map<Integer, EntityView> entityViews = new HashMap<Integer, EntityView>();
private final Map<String, Container<Actor>> holders = new HashMap<String, Container<Actor>>();
private final List<EntityView> targetsSelected = new ArrayList<EntityView>();
private final Screen parentScreen;
private AvailableTargetsMessage targetsAvailable;
private final TargetableCallback onTarget = new TargetableCallback() {
@Override
public boolean addEntity(EntityView view) {
if (targetsSelected.contains(view)) {
targetsSelected.remove(view);
Gdx.app.log("GameScreen", "Removing selection " + view.getId());
view.setTargetable(TargetStatus.TARGETABLE, this);
return false;
}
if (targetsAvailable != null && targetsAvailable.getMax() == 1 && targetsAvailable.getMin() == 1) {
Gdx.app.log("GameScreen", "Sending selection " + view.getId());
client.send(new UseAbilityMessage(gameId, targetsAvailable.getEntity(), targetsAvailable.getAction(), new int[]{ view.getId() }));
return false;
}
Gdx.app.log("GameScreen", "Adding selection " + view.getId());
view.setTargetable(TargetStatus.TARGETED, this);
return targetsSelected.add(view);
}
};
private final CardshifterClientContext context;
//private final float screenWidth;
private final float screenHeight;
public GameScreen(final CardshifterGame game, final CardshifterClient client, NewGameMessage message, final Screen parentScreen) {
this.parentScreen = parentScreen;
this.game = game;
this.client = client;
this.playerIndex = message.getPlayerIndex();
this.gameId = message.getGameId();
this.context = new CardshifterClientContext(game.skin, message.getGameId(), client, game.stage);
//this.screenWidth = CardshifterGame.STAGE_WIDTH;
this.screenHeight = CardshifterGame.STAGE_HEIGHT;
this.table = new Table(game.skin);
Table leftTable = new Table(game.skin);
Table topTable = new Table(game.skin);
//Table rightTable = new Table(game.skin);
Table centerTable = new Table(game.skin);
TextButton backToMenu = new TextButton("Back to menu", game.skin);
backToMenu.addListener(new ClickListener() {
@Override
public void clicked(InputEvent event, float x, float y) {
game.setScreen(parentScreen);
}
});
leftTable.add(backToMenu).expandX().fill().row();
addZoneHolder(leftTable, 1 - this.playerIndex, "").expandY().fillY();
addZoneHolder(leftTable, this.playerIndex, "").expandY().fillY();
leftTable.add("controls").row();
TextButton actionDone = new TextButton("Done", game.skin);
actionDone.addListener(new ClickListener() {
@Override
public void clicked(InputEvent event, float x, float y) {
if (targetsAvailable != null) {
int selected = targetsSelected.size();
if (selected >= targetsAvailable.getMin() && selected <= targetsAvailable.getMax()) {
int[] targets = new int[targetsSelected.size()];
for (int i = 0; i < targets.length; i++) {
targets[i] = targetsSelected.get(i).getId();
}
UseAbilityMessage message = new UseAbilityMessage(gameId, targetsAvailable.getEntity(), targetsAvailable.getAction(), targets);
client.send(message);
}
}
}
});
leftTable.add(actionDone);
topTable.add(leftTable).left().expandY().fillY();
topTable.add(centerTable).center().expandX().expandY().fill();
//topTable.add(rightTable).right().width(150).expandY().fillY();
addZoneHolder(centerTable, 1 - this.playerIndex, "Hand").top().height(this.screenHeight/4);
addZoneHolder(centerTable, 1 - this.playerIndex, "Battlefield").height(this.screenHeight/4);
addZoneHolder(centerTable, this.playerIndex, "Battlefield").height(this.screenHeight/4);
this.table.add(topTable).expand().fill().row();
addZoneHolder(this.table, this.playerIndex, "Hand").height(140).expandX().fill();
this.table.setFillParent(true);
}
private Cell<Container<Actor>> addZoneHolder(Table table, int i, String name) {
Container<Actor> container = new Container<Actor>();
container.setName(name);
// container.fill();
Cell<Container<Actor>> cell = table.add(container).expandX().fillX();<|fim▁hole|> return cell;
}
@Override
public void render(float delta) {
}
@Override
public void resize(int width, int height) {
}
@Override
public void show() {
game.stage.addActor(table);
}
@Override
public void hide() {
table.remove();
}
@Override
public void pause() {
}
@Override
public void resume() {
}
@Override
public void dispose() {
}
public Map<Class<? extends Message>, SpecificHandler<?>> getHandlers() {
Map<Class<? extends Message>, SpecificHandler<?>> handlers =
new HashMap<Class<? extends Message>, SpecificHandler<?>>();
handlers.put(AvailableTargetsMessage.class, new SpecificHandler<AvailableTargetsMessage>() {
@Override
public void handle(AvailableTargetsMessage message) {
targetsAvailable = message;
targetsSelected.clear();
for (EntityView view : entityViews.values()) {
view.setTargetable(TargetStatus.NOT_TARGETABLE, onTarget);
}
for (int id : message.getTargets()) {
EntityView view = entityViews.get(id);
if (view != null) {
view.setTargetable(TargetStatus.TARGETABLE, onTarget);
}
}
}
});
handlers.put(UsableActionMessage.class, new SpecificHandler<UsableActionMessage>() {
@Override
public void handle(UsableActionMessage message) {
int id = message.getId();
EntityView view = entityViews.get(id);
if (view != null) {
view.usableAction(message);
if (view instanceof CardViewSmall) {
((CardViewSmall)view).setUsable(GameScreen.this);
}
}
}
});
handlers.put(CardInfoMessage.class, new SpecificHandler<CardInfoMessage>() {
@Override
public void handle(CardInfoMessage message) {
ZoneView zone = getZoneView(message.getZone());
if (zone != null) {
zone.removeCard(message.getId());
}
EntityView entityView = entityViews.remove(message.getId());
if (entityView != null) {
entityView.remove();
}
if (zone != null) {
entityViews.put(message.getId(), zone.addCard(message));
}
}
});
handlers.put(EntityRemoveMessage.class, new SpecificHandler<EntityRemoveMessage>() {
@Override
public void handle(EntityRemoveMessage message) {
EntityView view = entityViews.get(message.getEntity());
for (ZoneView zone : zoneViews.values()) {
if (zone.hasCard(message.getEntity())) {
zone.removeCard(message.getEntity());
}
}
if (view != null) {
view.entityRemoved();
entityViews.remove(message.getEntity());
}
}
});
handlers.put(GameOverMessage.class, new SpecificHandler<GameOverMessage>() {
@Override
public void handle(GameOverMessage message) {
Dialog dialog = new Dialog("Game Over!", context.getSkin()) {
@Override
protected void result(Object object) {
game.setScreen(parentScreen);
}
};
dialog.button("OK");
dialog.show(context.getStage());
}
});
handlers.put(PlayerMessage.class, new SpecificHandler<PlayerMessage>() {
@Override
public void handle(PlayerMessage message) {
PlayerView playerView = new PlayerView(context, message);
entityViews.put(message.getId(), playerView);
Container<Actor> holder = holders.get(String.valueOf(message.getIndex()));
if (holder != null) {
holder.setActor(playerView.getActor());
}
}
});
handlers.put(ResetAvailableActionsMessage.class, new SpecificHandler<ResetAvailableActionsMessage>() {
@Override
public void handle(ResetAvailableActionsMessage message) {
for (EntityView view : entityViews.values()) {
view.setTargetable(TargetStatus.NOT_TARGETABLE, null);
view.clearUsableActions();
}
}
});
handlers.put(UpdateMessage.class, new SpecificHandler<UpdateMessage>() {
@Override
public void handle(UpdateMessage message) {
EntityView entityView = entityViews.get(message.getId());
if (entityView != null) {
entityView.set(message.getKey(), message.getValue());
}
}
});
handlers.put(ZoneChangeMessage.class, new SpecificHandler<ZoneChangeMessage>() {
@Override
public void handle(ZoneChangeMessage message) {
ZoneView oldZone = getZoneView(message.getSourceZone()); // can be null
ZoneView destinationZone = getZoneView(message.getDestinationZone());
int id = message.getEntity();
CardView entityView = (CardView) entityViews.remove(id); // can be null
if (oldZone != null) {
oldZone.removeCard(id);
}
if (destinationZone != null) {
CardView newCardView = destinationZone.addCard(new CardInfoMessage(message.getDestinationZone(), id,
entityView == null ? null : entityView.getInfo()));
if (entityView != null) {
entityView.zoneMove(message, destinationZone, newCardView);
}
entityViews.put(id, newCardView);
}
else {
if (entityView != null) {
entityView.zoneMove(message, destinationZone, null);
}
}
/*
Send to AI Medium: ZoneChangeMessage [entity=95, sourceZone=72, destinationZone=73]
Send to AI Medium: CardInfo: 95 in zone 73 - {SCRAP=1, TAUNT=1, MAX_HEALTH=1, SICKNESS=1, MANA_COST=2, name=The Chopper, ATTACK=2, creatureType=Mech, HEALTH=1, ATTACK_AVAILABLE=1}
Send to Zomis: ZoneChangeMessage [entity=95, sourceZone=72, destinationZone=73]
if card is already known, send ZoneChange only
if card is not known, send ZoneChange first and then CardInfo
when cards are created from nowhere, ZoneChange with source -1 is sent and then CardInfo
*/
}
});
handlers.put(ZoneMessage.class, new SpecificHandler<ZoneMessage>() {
@Override
public void handle(ZoneMessage message) {
Gdx.app.log("GameScreen", "Zone " + message);
ZoneView zoneView = createZoneView(message);
if (zoneView != null) {
PlayerView view = (PlayerView) entityViews.get(message.getOwner());
if (view == null) {
Gdx.app.log("GameScreen", "no playerView for " + message.getOwner());
return;
}
String key = view.getIndex() + message.getName();
Container<Actor> container = holders.get(key);
if (container == null) {
Gdx.app.log("GameScreen", "no container for " + key);
return;
}
Gdx.app.log("GameScreen", "putting zoneview for " + key);
container.setActor(zoneView.getActor());
zoneViews.put(message.getId(), zoneView);
}
}
});
return handlers;
}
private ZoneView createZoneView(ZoneMessage message) {
String type = message.getName();
if (type.equals("Battlefield")) {
return new DefaultZoneView(context, message, this.entityViews);
}
if (type.equals("Hand")) {
return new DefaultZoneView(context, message, this.entityViews);
}
if (type.equals("Deck")) {
return new CompactHiddenZoneView(game, message);
}
if (type.equals("Cards")) {
return null; // Card models only
}
throw new RuntimeException("Unknown ZoneView type: " + message.getName());
}
private ZoneView getZoneView(int id) {
return this.zoneViews.get(id);
}
public boolean checkCardDrop(CardViewSmall cardView) {
Table table = (Table)cardView.getActor();
Vector2 stageLoc = table.localToStageCoordinates(new Vector2());
Rectangle tableRect = new Rectangle(stageLoc.x, stageLoc.y, table.getWidth(), table.getHeight());
for (Container<Actor> actor : this.holders.values()) {
if (actor.getName() == "Battlefield") {
Vector2 stageBattlefieldLoc = actor.localToStageCoordinates(new Vector2(actor.getActor().getX(), actor.getActor().getY()));
Vector2 modifiedSBL = new Vector2(stageBattlefieldLoc.x - actor.getWidth()/2, stageBattlefieldLoc.y - actor.getHeight()/2);
Rectangle deckRect = new Rectangle(modifiedSBL.x, modifiedSBL.y, actor.getWidth() * 0.8f, actor.getHeight());
//uncomment this to see the bug where battlefields pop up in strange places
/*
Image squareImage = new Image(new Texture(Gdx.files.internal("cardbg.png")));
squareImage.setPosition(modifiedSBL.x, modifiedSBL.y);
squareImage.setSize(deckRect.width, deckRect.height);
this.game.stage.addActor(squareImage);
*/
if (tableRect.overlaps(deckRect)) {
//this.addEntity(cardView);
System.out.println("target found!");
return true;
}
}
}
return false;
//these can be used to double check the location of the rectangles
/*
Image squareImage = new Image(new Texture(Gdx.files.internal("cardbg.png")));
squareImage.setPosition(modifiedSBL.x, modifiedSBL.y);
squareImage.setSize(deckRect.width, deckRect.height);
this.game.stage.addActor(squareImage);
*/
/*
Image squareImage = new Image(new Texture(Gdx.files.internal("cardbg.png")));
squareImage.setPosition(stageLoc.x, stageLoc.y);
squareImage.setSize(tableRect.width, tableRect.height);
this.game.stage.addActor(squareImage);
*/
}
@Override
public boolean addEntity(EntityView view) {
//called by the CardViewSmall when not in mulligan mode, nothing will happen
return false;
}
}<|fim▁end|> | table.row();
holders.put(i + name, container); |
<|file_name|>social-register.controller.js<|end_file_name|><|fim▁begin|>'use strict';
angular.module('friendflixApp')
.controller('SocialRegisterController', function ($scope, $filter, $stateParams) {
$scope.provider = $stateParams.provider;
$scope.providerLabel = $filter('capitalize')($scope.provider);<|fim▁hole|> $scope.error = !$scope.success;
});<|fim▁end|> | $scope.success = $stateParams.success; |
<|file_name|>About.js<|end_file_name|><|fim▁begin|>import React, {Component} from 'react';
import MiniInfoBar from '../components/MiniInfoBar';
export default class About extends Component {
state = {
showKitten: false
}
handleToggleKitten() {
this.setState({showKitten: !this.state.showKitten});
}
render() {
const {showKitten} = this.state;
const kitten = require('./kitten.jpg');
return (
<div>
<div className="container">
<h1>About Us</h1>
<p>This project was orginally created by Erik Rasmussen
(<a href="https://twitter.com/erikras" target="_blank">@erikras</a>), but has since seen many contributions
from the open source community. Thank you to <a
href="https://github.com/erikras/react-redux-universal-hot-example/graphs/contributors"
target="_blank">all the contributors</a>.
</p>
<h3>Mini Bar <span style={{color: '#aaa'}}>(not that kind)</span></h3>
<p>Hey! You found the mini info bar! The following component is display-only. Note that it shows the same
time as the info bar.</p>
<MiniInfoBar/>
<h3>Images</h3>
<p>
Psst! Would you like to see a kitten?<|fim▁hole|> onClick={::this.handleToggleKitten}>
{showKitten ? 'No! Take it away!' : 'Yes! Please!'}</button>
</p>
{showKitten && <div><img src={kitten}/></div>}
</div>
</div>
);
}
}<|fim▁end|> |
<button className={'btn btn-' + (showKitten ? 'danger' : 'success')}
style={{marginLeft: 50}} |
<|file_name|>format.js<|end_file_name|><|fim▁begin|>var cssbeautify = require('gulp-cssbeautify');
var gulp = require('gulp');
var imagemin = require('gulp-imagemin');
var jsprettify = require('gulp-jsbeautifier');
var path = require('path');
var pngcrush = require('imagemin-pngcrush');
var ROOT = path.join(__dirname, '..');
gulp.task('format-css', function() {
var files = [
'src/**/*.css',
'!src/aui-css/css/*.css'
];
return gulp.src(files, { cwd: ROOT })
.pipe(cssbeautify())
.pipe(gulp.dest(path.join(ROOT, 'src/')));
});
gulp.task('format-js', function() {
var configFile = path.join(ROOT, '.jsbeautifyrc');
var files = [
'src/**/*.js',
'!build/**/*.js',
'!src/aui-base/js/aui-aliases.js',
'!src/aui-base/js/aui-loader.js',
'!src/yui/js/*.js'
];
return gulp.src(files, { cwd: ROOT })
.pipe(jsprettify({
config: configFile
}))
.pipe(gulp.dest(path.join(ROOT, 'src/')));
});
gulp.task('format-img', function() {
return gulp.src('src/**/*.png', { cwd: ROOT })
.pipe(imagemin({<|fim▁hole|> }],
use: [pngcrush()]
}))
.pipe(gulp.dest(path.join(ROOT, 'src/')));
});
gulp.task('format', ['format-css', 'format-js', 'format-img']);<|fim▁end|> | progressive: true,
svgoPlugins: [{
removeViewBox: false |
<|file_name|>sns.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf8 -*-
from pprint import pprint
import sys,os
import random
import json
import gzip
import random
import boto3
session = boto3.Session(
region_name='cn-north-1',
aws_access_key_id='xxxxxxxxxxxxxx',
aws_secret_access_key='xxxxxxxxxxxxxxxxxxxxxx'
)
<|fim▁hole|>
A=sns.create_topic(Name='abc').arn
#print(A)
#res = sns_client.list_topics()
#pprint(res)
#message={"create-time":"2019-04-23-15-59-04","synctoken":"1556035144","md5":"b7a7f68fad03bfe97ae401a6a126192e","url":"https://ip-ranges.amazonaws.com/ip-ranges.json"}
message={"create-time":"2019-04-23-15-59-04","synctoken":"1556035144","md5":"xxxxxxxxxx","url":"https://ip-ranges.amazonaws.com/ip-ranges.json"}
data={'default': json.dumps(message)}
print(json.dumps(data))
res = sns_client.publish(
TopicArn='arn:aws-cn:sns:cn-north-1:048912060910:AmazonIpSpaceChangedTest',
Message=json.dumps(data),
MessageStructure='json'
)
pprint(res)<|fim▁end|> | sns = session.resource('sns')
sns_client = session.client('sns') |
<|file_name|>tok.py<|end_file_name|><|fim▁begin|>class Token:
"""
Class representing a token.
kind: the kind of token, e.g. filename, number, other
value: specific instance value, e.g. "/tmp/foo.c", or 5
offset: byte offset from start of parse string
"""
def __init__(self, kind, value=None, offset=None):
self.offset = offset
self.kind = kind
self.value = value
def __eq__(self, o):
""" '==', but it's okay if offset is different"""
if isinstance(o, Token):
# Both are tokens: compare kind and value
# It's okay if offsets are different
return (self.kind == o.kind)
else:
return self.kind == o
def __repr__(self):
return str(self.kind)
def __repr1__(self, indent, sib_num=''):<|fim▁hole|>
def __str__(self):
return self.format(line_prefix='')
def format(self, line_prefix='', sib_num=None):
if sib_num:
sib_num = "%d." % sib_num
else:
sib_num = ''
prefix = ('%s%s' % (line_prefix, sib_num))
offset_opname = '%5s %-10s' % (self.offset, self.kind)
if not self.value:
return "%s%s" % (prefix, offset_opname)
return "%s%s %s" % (prefix, offset_opname, self.value)
def __hash__(self):
return hash(self.kind)
def __getitem__(self, i):
raise IndexError<|fim▁end|> | return self.format(line_prefix=indent, sib_num=sib_num) |
<|file_name|>create-group.ts<|end_file_name|><|fim▁begin|>import { getEmailTransport, initializeAdminApp } from './_internal';
import * as functions from 'firebase-functions';
import { District, GroupCreateInput, RepresentativeCreateInput } from '@civ/city-council';
import { createRandomString } from './utils';
const app = initializeAdminApp();
const db = app.database();
const auth = app.auth();
const cors = require('cors')({ origin: true });
export const createGroup = functions.https.onRequest((request, response) => {
cors(request, response, () => {
console.info(`Creating group for input:`);
console.info(request.body);
doCreateGroup(request.body as GroupCreateInput).then(groupId => {
console.info(`Successfully created group ${groupId}`);
response.status(201).send({
success: true,
groupId
});
}).catch(error => {
console.error(`Error creating group: ${JSON.stringify(error)}`);
console.error(error);
response.status(500).send({
success: false,
error
});
})<|fim▁hole|> });
});
export async function doCreateGroup(groupInput: GroupCreateInput) {
const groupId = await pushBasicInfo(groupInput.name, groupInput.icon, groupInput.adminId);
console.info(`pushed basic info - new group id: ${groupId}`);
const repDataMap = groupInput.representatives.reduce((result, repData) => ({ ...result, [repData.id]: repData }), {});
console.info(`creating rep user accounts`);
//create user accounts for each representative,
const repPushes: Promise<{ inputId: string, outputId: string }[]> =
Promise.all(
groupInput.representatives.map(repData => new Promise((resolve, reject) => {
return createUserAccountForRepresentative(repData, { id: groupId, name: groupInput.name })
.then(outputId => resolve({ inputId: repData.id, outputId }))
.catch(err => reject(err))
})
)
);
const repIdMap: { [inputId: string]: string } = (await repPushes).reduce(
(result, entry: any) => ({ ...result, [entry.inputId]: entry.outputId }),
{});
console.info(`adding rep info to group`);
//push representative objects (keyed by user id) to group
await Promise.all(Object.keys(repIdMap)
.map(inputId => addRepresentativeInfoToGroup(repIdMap[ inputId ], repDataMap[ inputId ], groupId))
);
console.info(`creating districts`);
//create districts, linking to representatives by correct userId
const districts = await Promise.all(groupInput.districts.map(data => new Promise((resolve, reject) =>
createDistrict(groupId, data.name, repIdMap[ data.representative ])
.then(id => resolve({ ...data, id }))
)));
//finally, update rep user objects to attach them to their respective districts
console.info('updating rep user groups');
await Promise.all(districts.map((district: District) => {
let repId = repIdMap[ district.representative ];
return updateRepresentativeGroups(repId, { id: groupId, name: groupInput.name }, {
id: district.id,
name: district.name
})
}));
return groupId;
}
async function createDistrict(groupId: string, name: string, representative: string) {
const result = await db.ref(`/group/${groupId}/districts`).push({
name,
representative
});
return result.key;
}
async function addRepresentativeInfoToGroup(repId: string, repData: RepresentativeCreateInput, groupId: string) {
return await db.ref(`/group/${groupId}/representatives`).update({
[repId]: {
firstName: repData.firstName,
lastName: repData.lastName,
icon: repData.icon,
email: repData.email,
title: repData.title
}
})
}
async function pushBasicInfo(name: string, icon: string, owner: string): Promise<string> {
console.info(`pushing basic info: {name: ${name}, icon: ${icon}, owner: ${owner}`);
const result = await db.ref(`/group`).push({ name, icon, owner });
return result.key;
}
async function createUserAccountForRepresentative(input: RepresentativeCreateInput,
group: { id: string, name: string },
district?: { id: string, name: string }): Promise<string> {
let password = createRandomString(),
userId: string;
try {
userId = await createAuthAccount(input.email, password);
console.info('DONE creating auth account');
} catch (err) {
console.error('ERROR creating auth account');
console.error(err);
throw new Error(`Error creating auth account: ${JSON.stringify(err)}`);
}
try {
await createUserPrivateEntry(userId, input.email);
} catch (err) {
console.error('ERROR creating user private entry');
throw new Error(`Error creating userPrivate entry: ${JSON.stringify(err)}`);
}
console.info('DONE creating user private entry');
try {
await createUserPublicEntry(userId, input.firstName, input.lastName, input.icon);
console.info('DONE creating user public entry');
} catch (err) {
console.error('ERROR creating user public entry');
console.error(err);
throw new Error(`Error creating userPublic entry: ${JSON.stringify(err)}`);
}
try {
await sendRepresentativeEmail('[email protected]', password, input.firstName, group.name);
console.info(`DONE sending email to ${input.email}`);
} catch (err) {
console.error(`ERROR sending email to ${input.email}`);
console.error(err);
/* if (err){
throw new Error(`Error sending representative email: ${JSON.stringify(err)}`);
}*/
}
console.info(`DONE creating rep account for ${input.firstName} ${input.lastName}`);
return userId;
async function createAuthAccount(email: string, password: string): Promise<string> {
const result = await auth.createUser({
email, password, emailVerified: true
});
return result.uid;
}
async function createUserPrivateEntry(id: string, email: string) {
return await db.ref(`/user_private/${id}`).set({
email, isVerified: true
});
}
async function createUserPublicEntry(id: string, firstName: string, lastName: string, icon: string) {
return await db.ref(`/user/${id}`).set({
firstName,
lastName,
icon
})
}
function sendRepresentativeEmail(email: string, password: string, name: string, groupName: string) {
const msg = {
to: email,
subject: `Your new Civinomics Account`,
html: `
<div>
<p>Greetings, ${name}</p>
<p>${groupName} has recently begun using Civinomics, and you were listed as a representative. </p>
<p>A new account has been created for you - you can sign in <a href="https://civinomics.com/log-in">here</a> using the following credentials: </p>
</div>g
<strong>email:</strong> ${email}
<strong>temporary password: </strong> ${password}
</div>
<div>
<p>
If you have any questions, don't hesitate to contact us at <a href="mailto:[email protected]">[email protected]</a>
</p>
<p>Look forward to seeing you online! </p>
<p>-Team Civinomics</p>
</div>
`
};
return new Promise((resolve, reject) => {
const transport = getEmailTransport();
transport.sendMail(msg, (err, info) => {
if (err) {
reject(err);
return;
}
resolve(info);
console.log(`sent: ${JSON.stringify(info)}`);
});
});
}
}
async function updateRepresentativeGroups(repId: string, group: { id: string, name: string }, district?: { id: string, name: string }) {
let obj: any = {
[group.id]: {
name: group.name,
role: 'representative',
district
}
};
if (district) {
obj.district = { id: district.id, name: district.name }
}
return await db.ref(`/user/${repId}/groups`).update(obj);
}<|fim▁end|> | |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Copyright (C) 2009-2013 Roman Zimbelmann <[email protected]>
# This software is distributed under the terms of the GNU GPL version 3.
from inspect import isfunction
from ranger.ext.signals import SignalDispatcher, Signal
from ranger.core.shared import FileManagerAware
from ranger.gui.colorscheme import _colorscheme_name_to_class
import re
import os.path
ALLOWED_SETTINGS = {
'automatically_count_files': bool,
'autosave_bookmarks': bool,
'autoupdate_cumulative_size': bool,
'cd_bookmarks': bool,
'collapse_preview': bool,
'colorscheme': str,
'column_ratios': (tuple, list),
'confirm_on_delete': str,
'dirname_in_tabs': bool,
'display_size_in_main_column': bool,
'display_size_in_status_bar': bool,
'display_tags_in_all_columns': bool,
'draw_borders': bool,
'draw_progress_bar_in_status_bar': bool,
'flushinput': bool,
'hidden_filter': str,
'idle_delay': int,
'max_console_history_size': (int, type(None)),
'max_history_size': (int, type(None)),
'mouse_enabled': bool,
'open_all_images': bool,
'padding_right': bool,
'preview_directories': bool,
'preview_files': bool,
'preview_images': bool,
'preview_max_size': int,
'preview_script': (str, type(None)),
'save_console_history': bool,
'scroll_offset': int,
'shorten_title': int,
'show_cursor': bool, # TODO: not working?
'show_selection_in_titlebar': bool,
'show_hidden_bookmarks': bool,
'show_hidden': bool,
'sort_case_insensitive': bool,
'sort_directories_first': bool,
'sort_reverse': bool,
'sort': str,
'status_bar_on_top': bool,
'tilde_in_titlebar': bool,
'unicode_ellipsis': bool,
'update_title': bool,
'update_tmux_title': bool,
'use_preview_script': bool,
'vcs_aware': bool,
'vcs_backend_bzr': str,
'vcs_backend_git': str,
'vcs_backend_hg': str,
'xterm_alt_key': bool,
}
DEFAULT_VALUES = {
bool: False,
type(None): None,
str: "",
int: 0,
list: [],
tuple: tuple([]),
}
class Settings(SignalDispatcher, FileManagerAware):
def __init__(self):
SignalDispatcher.__init__(self)
self.__dict__['_localsettings'] = dict()
self.__dict__['_localregexes'] = dict()
self.__dict__['_tagsettings'] = dict()
self.__dict__['_settings'] = dict()
for name in ALLOWED_SETTINGS:
self.signal_bind('setopt.'+name,
self._sanitize, priority=1.0)
self.signal_bind('setopt.'+name,
self._raw_set_with_signal, priority=0.2)
def _sanitize(self, signal):
name, value = signal.setting, signal.value
if name == 'column_ratios':
# TODO: cover more cases here
if isinstance(value, tuple):
signal.value = list(value)
if not isinstance(value, list) or len(value) < 2:
signal.value = [1, 1]
else:
signal.value = [int(i) if str(i).isdigit() else 1 \
for i in value]
elif name == 'colorscheme':
_colorscheme_name_to_class(signal)
elif name == 'preview_script':
if isinstance(value, str):
result = os.path.expanduser(value)
if os.path.exists(result):
signal.value = result
else:
signal.value = None
elif name == 'use_preview_script':
if self._settings['preview_script'] is None and value \
and self.fm.ui.is_on:
self.fm.notify("Preview script undefined or not found!",
bad=True)
def set(self, name, value, path=None, tags=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if name not in self._settings:
previous = None
else:
previous=self._settings[name]
assert self._check_type(name, value)
assert not (tags and path), "Can't set a setting for path and tag " \
"at the same time!"
kws = dict(setting=name, value=value, previous=previous,
path=path, tags=tags, fm=self.fm)
self.signal_emit('setopt', **kws)
self.signal_emit('setopt.'+name, **kws)
def get(self, name, path=None):
assert name in ALLOWED_SETTINGS, "No such setting: {0}!".format(name)
if path:
localpath = path
else:
try:
localpath = self.fm.thisdir.path
except:
localpath = path
if localpath:
for pattern, regex in self._localregexes.items():
if name in self._localsettings[pattern] and\
regex.search(localpath):
return self._localsettings[pattern][name]
if self._tagsettings and path:
realpath = os.path.realpath(path)
if realpath in self.fm.tags:
tag = self.fm.tags.marker(realpath)
if tag in self._tagsettings and name in self._tagsettings[tag]:<|fim▁hole|> type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._raw_set(name, value)
self.__setattr__(name, value)
return self._settings[name]
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self.set(name, value, None)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self.get(name, None)
def __iter__(self):
for x in self._settings:
yield x
def types_of(self, name):
try:
typ = ALLOWED_SETTINGS[name]
except KeyError:
return tuple()
else:
if isinstance(typ, tuple):
return typ
else:
return (typ, )
def _check_type(self, name, value):
typ = ALLOWED_SETTINGS[name]
if isfunction(typ):
assert typ(value), \
"Warning: The option `" + name + "' has an incorrect type!"
else:
assert isinstance(value, typ), \
"Warning: The option `" + name + "' has an incorrect type!"\
" Got " + str(type(value)) + ", expected " + str(typ) + "!" +\
" Please check if your commands.py is up to date." if not \
self.fm.ui.is_set_up else ""
return True
__getitem__ = __getattr__
__setitem__ = __setattr__
def _raw_set(self, name, value, path=None, tags=None):
if path:
if not path in self._localsettings:
try:
regex = re.compile(path)
except:
# Bad regular expression
return
self._localregexes[path] = regex
self._localsettings[path] = dict()
self._localsettings[path][name] = value
# make sure name is in _settings, so __iter__ runs through
# local settings too.
if not name in self._settings:
type_ = self.types_of(name)[0]
value = DEFAULT_VALUES[type_]
self._settings[name] = value
elif tags:
for tag in tags:
if tag not in self._tagsettings:
self._tagsettings[tag] = dict()
self._tagsettings[tag][name] = value
else:
self._settings[name] = value
def _raw_set_with_signal(self, signal):
self._raw_set(signal.setting, signal.value, signal.path, signal.tags)
class LocalSettings():
def __init__(self, path, parent):
self.__dict__['_parent'] = parent
self.__dict__['_path'] = path
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self._parent.set(name, value, self._path)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return self._parent.get(name, self._path)
def __iter__(self):
for x in self._parent._settings:
yield x
__getitem__ = __getattr__
__setitem__ = __setattr__<|fim▁end|> | return self._tagsettings[tag][name]
if name in self._settings:
return self._settings[name]
else: |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
import datetime
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from iati_synchroniser.dataset_syncer import DatasetSyncer
from iati_synchroniser.codelist_importer import CodeListImporter
from iati.parser import Parser
from iati_synchroniser.admin_tools import AdminTools
INTERVAL_CHOICES = (
(u'YEARLY', _(u"Parse yearly")),
(u'MONTHLY', _(u"Parse monthly")),
(u'WEEKLY', _(u"Parse weekly")),
(u'DAILY', _(u"Parse daily")),
)
class Publisher(models.Model):
org_id = models.CharField(max_length=100, blank=True, null=True)
org_abbreviate = models.CharField(max_length=55, blank=True, null=True)
org_name = models.CharField(max_length=255)
default_interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES, default=u'MONTHLY')
XML_total_activity_count = models.IntegerField(null=True, default=None)
OIPA_total_activity_count = models.IntegerField(null=True, default=None)
def __unicode__(self):
return self.org_id
class IatiXmlSource(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
INTERVAL_CHOICES = (
("day", _(u"Day")),
("week", _(u"Week")),
("month", _(u"Month")),
("year", _(u"Year")),
)
ref = models.CharField(verbose_name=_(u"Reference"), max_length=70, help_text=_(u"Reference for the XML file. Preferred usage: 'collection' or single country or region name"))
title = models.CharField(max_length=255, null=True)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
publisher = models.ForeignKey(Publisher)
source_url = models.CharField(max_length=255, unique=True, help_text=_(u"Hyperlink to an iati activity or organisation XML file."))
date_created = models.DateTimeField(auto_now_add=True, editable=False)
date_updated = models.DateTimeField(auto_now_add=True, editable=False)
update_interval = models.CharField(max_length=20, choices=INTERVAL_CHOICES, default="month", null=True, blank=True)
last_found_in_registry = models.DateTimeField(default=None, null=True)
xml_activity_count = models.IntegerField(null=True, default=None)
oipa_activity_count = models.IntegerField(null=True, default=None)
iati_standard_version = models.CharField(max_length=10, null=True, default=None)
class Meta:
verbose_name_plural = "iati XML sources"
ordering = ["ref"]
def __unicode__(self):
return self.ref
def get_parse_status(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-xml='xml_%i' class='parse'><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
get_parse_status.allow_tags = True
get_parse_status.short_description = _(u"Parse status")
def process(self):
parser = Parser()
parser.parse_url(self.source_url, self.ref)
self.date_updated = datetime.datetime.now()
activity_counter = AdminTools()
self.xml_activity_count = activity_counter.get_xml_activity_amount(self.source_url)
self.oipa_activity_count = activity_counter.get_oipa_activity_amount(self.ref)
self.save(process=False)
def save(self, process=True, *args, **kwargs):
super(IatiXmlSource, self).save()
if process:
self.process()
class DatasetSync(models.Model):
TYPE_CHOICES = (
(1, _(u"Activity Files")),
(2, _(u"Organisation Files")),
)
interval = models.CharField(verbose_name=_(u"Interval"), max_length=55, choices=INTERVAL_CHOICES)
date_updated = models.DateTimeField(auto_now=True, editable=False)
type = models.IntegerField(choices=TYPE_CHOICES, default=1)
def __unicode__(self):
return self.interval
class Meta:
verbose_name_plural = "dataset synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def _add_month(self, d,months=1):
year, month, day = d.timetuple()[:3]
new_month = month + months
return datetime.date(year + ((new_month-1) / 12), (new_month-1) % 12 +1, day)
def process(self):<|fim▁hole|> if self.interval == u'YEARLY' and (self._add_month(self.date_updated, 12) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'MONTHLY' and (self._add_month(self.date_updated) <= datetime.datetime.now().date()):
self.sync_dataset_with_iati_api()
elif self.interval == u'WEEKLY' and (self.date_updated+datetime.timedelta(7) <= datetime.datetime.today()):
self.sync_dataset_with_iati_api()
elif self.interval == u'DAILY' and (self.date_updated+datetime.timedelta(1) <= datetime.datetime.today()):
self.sync_dataset_with_iati_api()
def sync_dataset_with_iati_api(self):
syncer = DatasetSyncer()
syncer.synchronize_with_iati_api(self.type)
class CodelistSync(models.Model):
date_updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
verbose_name_plural = "codelist synchronisers"
def sync_now(self):
return mark_safe("<img class='loading' src='/static/img/loading.gif' alt='loading' style='display:none;' /><a data-sync='sync_%i' class='sync '><img src='/static/img/utils.parse.png' style='cursor:pointer;' /></a>") % self.id
sync_now.allow_tags = True
sync_now.short_description = _(u"Sync now?")
def sync_codelist(self):
syncer = CodeListImporter()
syncer.synchronise_with_codelists()<|fim▁end|> | |
<|file_name|>qgsogcutils.cpp<|end_file_name|><|fim▁begin|>/***************************************************************************
qgsogcutils.cpp
---------------------
begin : March 2013
copyright : (C) 2013 by Martin Dobias
email : wonder dot sk at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
#include "qgsogcutils.h"
#include "qgsexpression.h"
#include "qgsexpressionnodeimpl.h"
#include "qgsexpressionfunction.h"
#include "qgsexpressionprivate.h"
#include "qgsgeometry.h"
#include "qgswkbptr.h"
#include "qgscoordinatereferencesystem.h"
#include "qgsrectangle.h"
#include "qgsvectorlayer.h"
#include "qgsexpressioncontextutils.h"
#include <QColor>
#include <QStringList>
#include <QTextStream>
#include <QObject>
#ifndef Q_OS_WIN
#include <netinet/in.h>
#else
#include <winsock.h>
#endif
static const QString GML_NAMESPACE = QStringLiteral( "http://www.opengis.net/gml" );
static const QString GML32_NAMESPACE = QStringLiteral( "http://www.opengis.net/gml/3.2" );
static const QString OGC_NAMESPACE = QStringLiteral( "http://www.opengis.net/ogc" );
static const QString FES_NAMESPACE = QStringLiteral( "http://www.opengis.net/fes/2.0" );
QgsOgcUtilsExprToFilter::QgsOgcUtilsExprToFilter( QDomDocument &doc,
QgsOgcUtils::GMLVersion gmlVersion,
QgsOgcUtils::FilterVersion filterVersion,
const QString &geometryName,
const QString &srsName,
bool honourAxisOrientation,
bool invertAxisOrientation )
: mDoc( doc )
, mGMLUsed( false )
, mGMLVersion( gmlVersion )
, mFilterVersion( filterVersion )
, mGeometryName( geometryName )
, mSrsName( srsName )
, mInvertAxisOrientation( invertAxisOrientation )
, mFilterPrefix( ( filterVersion == QgsOgcUtils::FILTER_FES_2_0 ) ? "fes" : "ogc" )
, mPropertyName( ( filterVersion == QgsOgcUtils::FILTER_FES_2_0 ) ? "ValueReference" : "PropertyName" )
, mGeomId( 1 )
{
QgsCoordinateReferenceSystem crs;
if ( !mSrsName.isEmpty() )
crs = QgsCoordinateReferenceSystem::fromOgcWmsCrs( mSrsName );
if ( crs.isValid() )
{
if ( honourAxisOrientation && crs.hasAxisInverted() )
{
mInvertAxisOrientation = !mInvertAxisOrientation;
}
}
}
QgsGeometry QgsOgcUtils::geometryFromGML( const QDomNode &geometryNode )
{
QDomElement geometryTypeElement = geometryNode.toElement();
QString geomType = geometryTypeElement.tagName();
if ( !( geomType == QLatin1String( "Point" ) || geomType == QLatin1String( "LineString" ) || geomType == QLatin1String( "Polygon" ) ||
geomType == QLatin1String( "MultiPoint" ) || geomType == QLatin1String( "MultiLineString" ) || geomType == QLatin1String( "MultiPolygon" ) ||
geomType == QLatin1String( "Box" ) || geomType == QLatin1String( "Envelope" ) ) )
{
QDomNode geometryChild = geometryNode.firstChild();
if ( geometryChild.isNull() )
{
return QgsGeometry();
}
geometryTypeElement = geometryChild.toElement();
geomType = geometryTypeElement.tagName();
}
if ( !( geomType == QLatin1String( "Point" ) || geomType == QLatin1String( "LineString" ) || geomType == QLatin1String( "Polygon" ) ||
geomType == QLatin1String( "MultiPoint" ) || geomType == QLatin1String( "MultiLineString" ) || geomType == QLatin1String( "MultiPolygon" ) ||
geomType == QLatin1String( "Box" ) || geomType == QLatin1String( "Envelope" ) ) )
return QgsGeometry();
if ( geomType == QLatin1String( "Point" ) )
{
return geometryFromGMLPoint( geometryTypeElement );
}
else if ( geomType == QLatin1String( "LineString" ) )
{
return geometryFromGMLLineString( geometryTypeElement );
}
else if ( geomType == QLatin1String( "Polygon" ) )
{
return geometryFromGMLPolygon( geometryTypeElement );
}
else if ( geomType == QLatin1String( "MultiPoint" ) )
{
return geometryFromGMLMultiPoint( geometryTypeElement );
}
else if ( geomType == QLatin1String( "MultiLineString" ) )
{
return geometryFromGMLMultiLineString( geometryTypeElement );
}
else if ( geomType == QLatin1String( "MultiPolygon" ) )
{
return geometryFromGMLMultiPolygon( geometryTypeElement );
}
else if ( geomType == QLatin1String( "Box" ) )
{
return QgsGeometry::fromRect( rectangleFromGMLBox( geometryTypeElement ) );
}
else if ( geomType == QLatin1String( "Envelope" ) )
{
return QgsGeometry::fromRect( rectangleFromGMLEnvelope( geometryTypeElement ) );
}
else //unknown type
{
return QgsGeometry();
}
}
QgsGeometry QgsOgcUtils::geometryFromGML( const QString &xmlString )
{
// wrap the string into a root tag to have "gml" namespace (and also as a default namespace)
QString xml = QStringLiteral( "<tmp xmlns=\"%1\" xmlns:gml=\"%1\">%2</tmp>" ).arg( GML_NAMESPACE, xmlString );
QDomDocument doc;
if ( !doc.setContent( xml, true ) )
return QgsGeometry();
return geometryFromGML( doc.documentElement().firstChildElement() );
}
QgsGeometry QgsOgcUtils::geometryFromGMLPoint( const QDomElement &geometryElement )
{
QgsPolylineXY pointCoordinate;
QDomNodeList coordList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "coordinates" ) );
if ( !coordList.isEmpty() )
{
QDomElement coordElement = coordList.at( 0 ).toElement();
if ( readGMLCoordinates( pointCoordinate, coordElement ) != 0 )
{
return QgsGeometry();
}
}
else
{
QDomNodeList posList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "pos" ) );
if ( posList.size() < 1 )
{
return QgsGeometry();
}
QDomElement posElement = posList.at( 0 ).toElement();
if ( readGMLPositions( pointCoordinate, posElement ) != 0 )
{
return QgsGeometry();
}
}
if ( pointCoordinate.empty() )
{
return QgsGeometry();
}
QgsPolylineXY::const_iterator point_it = pointCoordinate.constBegin();
char e = htonl( 1 ) != 1;
double x = point_it->x();
double y = point_it->y();
int size = 1 + sizeof( int ) + 2 * sizeof( double );
QgsWkbTypes::Type type = QgsWkbTypes::Point;
unsigned char *wkb = new unsigned char[size];
int wkbPosition = 0; //current offset from wkb beginning (in bytes)
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
memcpy( &( wkb )[wkbPosition], &x, sizeof( double ) );
wkbPosition += sizeof( double );
memcpy( &( wkb )[wkbPosition], &y, sizeof( double ) );
QgsGeometry g;
g.fromWkb( wkb, size );
return g;
}
QgsGeometry QgsOgcUtils::geometryFromGMLLineString( const QDomElement &geometryElement )
{
QgsPolylineXY lineCoordinates;
QDomNodeList coordList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "coordinates" ) );
if ( !coordList.isEmpty() )
{
QDomElement coordElement = coordList.at( 0 ).toElement();
if ( readGMLCoordinates( lineCoordinates, coordElement ) != 0 )
{
return QgsGeometry();
}
}
else
{
QDomNodeList posList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "posList" ) );
if ( posList.size() < 1 )
{
return QgsGeometry();
}
QDomElement posElement = posList.at( 0 ).toElement();
if ( readGMLPositions( lineCoordinates, posElement ) != 0 )
{
return QgsGeometry();
}
}
char e = htonl( 1 ) != 1;
int size = 1 + 2 * sizeof( int ) + lineCoordinates.size() * 2 * sizeof( double );
QgsWkbTypes::Type type = QgsWkbTypes::LineString;
unsigned char *wkb = new unsigned char[size];
int wkbPosition = 0; //current offset from wkb beginning (in bytes)
double x, y;
int nPoints = lineCoordinates.size();
//fill the contents into *wkb
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
memcpy( &( wkb )[wkbPosition], &nPoints, sizeof( int ) );
wkbPosition += sizeof( int );
QgsPolylineXY::const_iterator iter;
for ( iter = lineCoordinates.constBegin(); iter != lineCoordinates.constEnd(); ++iter )
{
x = iter->x();
y = iter->y();
memcpy( &( wkb )[wkbPosition], &x, sizeof( double ) );
wkbPosition += sizeof( double );
memcpy( &( wkb )[wkbPosition], &y, sizeof( double ) );
wkbPosition += sizeof( double );
}
QgsGeometry g;
g.fromWkb( wkb, size );
return g;
}
QgsGeometry QgsOgcUtils::geometryFromGMLPolygon( const QDomElement &geometryElement )
{
//read all the coordinates (as QgsPoint) into memory. Each linear ring has an entry in the vector
QgsMultiPolylineXY ringCoordinates;
//read coordinates for outer boundary
QgsPolylineXY exteriorPointList;
QDomNodeList outerBoundaryList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "outerBoundaryIs" ) );
if ( !outerBoundaryList.isEmpty() ) //outer ring is necessary
{
QDomElement coordinatesElement = outerBoundaryList.at( 0 ).firstChild().firstChild().toElement();
if ( coordinatesElement.isNull() )
{
return QgsGeometry();
}
if ( readGMLCoordinates( exteriorPointList, coordinatesElement ) != 0 )
{
return QgsGeometry();
}
ringCoordinates.push_back( exteriorPointList );
//read coordinates for inner boundary
QDomNodeList innerBoundaryList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "innerBoundaryIs" ) );
for ( int i = 0; i < innerBoundaryList.size(); ++i )
{
QgsPolylineXY interiorPointList;
coordinatesElement = innerBoundaryList.at( i ).firstChild().firstChild().toElement();
if ( coordinatesElement.isNull() )
{
return QgsGeometry();
}
if ( readGMLCoordinates( interiorPointList, coordinatesElement ) != 0 )
{
return QgsGeometry();
}
ringCoordinates.push_back( interiorPointList );
}
}
else
{
//read coordinates for exterior
QDomNodeList exteriorList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "exterior" ) );
if ( exteriorList.size() < 1 ) //outer ring is necessary
{
return QgsGeometry();
}
QDomElement posElement = exteriorList.at( 0 ).firstChild().firstChild().toElement();
if ( posElement.isNull() )
{
return QgsGeometry();
}
if ( readGMLPositions( exteriorPointList, posElement ) != 0 )
{
return QgsGeometry();
}
ringCoordinates.push_back( exteriorPointList );
//read coordinates for inner boundary
QDomNodeList interiorList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "interior" ) );
for ( int i = 0; i < interiorList.size(); ++i )
{
QgsPolylineXY interiorPointList;
QDomElement posElement = interiorList.at( i ).firstChild().firstChild().toElement();
if ( posElement.isNull() )
{
return QgsGeometry();
}
if ( readGMLPositions( interiorPointList, posElement ) != 0 )
{
return QgsGeometry();
}
ringCoordinates.push_back( interiorPointList );
}
}
//calculate number of bytes to allocate
int nrings = ringCoordinates.size();
if ( nrings < 1 )
return QgsGeometry();
int npoints = 0;//total number of points
for ( QgsMultiPolylineXY::const_iterator it = ringCoordinates.constBegin(); it != ringCoordinates.constEnd(); ++it )
{
npoints += it->size();
}
int size = 1 + 2 * sizeof( int ) + nrings * sizeof( int ) + 2 * npoints * sizeof( double );
QgsWkbTypes::Type type = QgsWkbTypes::Polygon;
unsigned char *wkb = new unsigned char[size];
//char e = QgsApplication::endian();
char e = htonl( 1 ) != 1;
int wkbPosition = 0; //current offset from wkb beginning (in bytes)
int nPointsInRing = 0;
double x, y;
//fill the contents into *wkb
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
memcpy( &( wkb )[wkbPosition], &nrings, sizeof( int ) );
wkbPosition += sizeof( int );
for ( QgsMultiPolylineXY::const_iterator it = ringCoordinates.constBegin(); it != ringCoordinates.constEnd(); ++it )
{
nPointsInRing = it->size();
memcpy( &( wkb )[wkbPosition], &nPointsInRing, sizeof( int ) );
wkbPosition += sizeof( int );
//iterate through the string list converting the strings to x-/y- doubles
QgsPolylineXY::const_iterator iter;
for ( iter = it->begin(); iter != it->end(); ++iter )
{
x = iter->x();
y = iter->y();
//qWarning("currentCoordinate: " + QString::number(x) + " // " + QString::number(y));
memcpy( &( wkb )[wkbPosition], &x, sizeof( double ) );
wkbPosition += sizeof( double );
memcpy( &( wkb )[wkbPosition], &y, sizeof( double ) );
wkbPosition += sizeof( double );
}
}
QgsGeometry g;
g.fromWkb( wkb, size );
return g;
}
QgsGeometry QgsOgcUtils::geometryFromGMLMultiPoint( const QDomElement &geometryElement )
{
QgsPolylineXY pointList;
QgsPolylineXY currentPoint;
QDomNodeList pointMemberList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "pointMember" ) );
if ( pointMemberList.size() < 1 )
{
return QgsGeometry();
}
QDomNodeList pointNodeList;
// coordinates or pos element
QDomNodeList coordinatesList;
QDomNodeList posList;
for ( int i = 0; i < pointMemberList.size(); ++i )
{
//<Point> element
pointNodeList = pointMemberList.at( i ).toElement().elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "Point" ) );
if ( pointNodeList.size() < 1 )
{
continue;
}
//<coordinates> element
coordinatesList = pointNodeList.at( 0 ).toElement().elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "coordinates" ) );
if ( !coordinatesList.isEmpty() )
{
currentPoint.clear();
if ( readGMLCoordinates( currentPoint, coordinatesList.at( 0 ).toElement() ) != 0 )
{
continue;
}
if ( currentPoint.empty() )
{
continue;
}
pointList.push_back( ( *currentPoint.begin() ) );
continue;
}
else
{
//<pos> element
posList = pointNodeList.at( 0 ).toElement().elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "pos" ) );
if ( posList.size() < 1 )
{
continue;
}
currentPoint.clear();
if ( readGMLPositions( currentPoint, posList.at( 0 ).toElement() ) != 0 )
{
continue;
}
if ( currentPoint.empty() )
{
continue;
}
pointList.push_back( ( *currentPoint.begin() ) );
}
}
int nPoints = pointList.size(); //number of points
if ( nPoints < 1 )
return QgsGeometry();
//calculate the required wkb size
int size = 1 + 2 * sizeof( int ) + pointList.size() * ( 2 * sizeof( double ) + 1 + sizeof( int ) );
QgsWkbTypes::Type type = QgsWkbTypes::MultiPoint;
unsigned char *wkb = new unsigned char[size];
//fill the wkb content
char e = htonl( 1 ) != 1;
int wkbPosition = 0; //current offset from wkb beginning (in bytes)
double x, y;
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
memcpy( &( wkb )[wkbPosition], &nPoints, sizeof( int ) );
wkbPosition += sizeof( int );
type = QgsWkbTypes::Point;
for ( QgsPolylineXY::const_iterator it = pointList.constBegin(); it != pointList.constEnd(); ++it )
{
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
x = it->x();
memcpy( &( wkb )[wkbPosition], &x, sizeof( double ) );
wkbPosition += sizeof( double );
y = it->y();
memcpy( &( wkb )[wkbPosition], &y, sizeof( double ) );
wkbPosition += sizeof( double );
}
QgsGeometry g;
g.fromWkb( wkb, size );
return g;
}
QgsGeometry QgsOgcUtils::geometryFromGMLMultiLineString( const QDomElement &geometryElement )
{
//geoserver has
//<gml:MultiLineString>
//<gml:lineStringMember>
//<gml:LineString>
//mapserver has directly
//<gml:MultiLineString
//<gml:LineString
QList< QgsPolylineXY > lineCoordinates; //first list: lines, second list: points of one line
QDomElement currentLineStringElement;
QDomNodeList currentCoordList;
QDomNodeList currentPosList;
QDomNodeList lineStringMemberList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "lineStringMember" ) );
if ( !lineStringMemberList.isEmpty() ) //geoserver
{
for ( int i = 0; i < lineStringMemberList.size(); ++i )
{
QDomNodeList lineStringNodeList = lineStringMemberList.at( i ).toElement().elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "LineString" ) );
if ( lineStringNodeList.size() < 1 )
{
return QgsGeometry();
}
currentLineStringElement = lineStringNodeList.at( 0 ).toElement();
currentCoordList = currentLineStringElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "coordinates" ) );
if ( !currentCoordList.isEmpty() )
{
QgsPolylineXY currentPointList;
if ( readGMLCoordinates( currentPointList, currentCoordList.at( 0 ).toElement() ) != 0 )
{
return QgsGeometry();
}
lineCoordinates.push_back( currentPointList );
}
else
{
currentPosList = currentLineStringElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "posList" ) );
if ( currentPosList.size() < 1 )
{
return QgsGeometry();
}
QgsPolylineXY currentPointList;
if ( readGMLPositions( currentPointList, currentPosList.at( 0 ).toElement() ) != 0 )
{
return QgsGeometry();
}
lineCoordinates.push_back( currentPointList );
}
}
}
else
{
QDomNodeList lineStringList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "LineString" ) );
if ( !lineStringList.isEmpty() ) //mapserver
{
for ( int i = 0; i < lineStringList.size(); ++i )
{
currentLineStringElement = lineStringList.at( i ).toElement();
currentCoordList = currentLineStringElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "coordinates" ) );
if ( !currentCoordList.isEmpty() )
{
QgsPolylineXY currentPointList;
if ( readGMLCoordinates( currentPointList, currentCoordList.at( 0 ).toElement() ) != 0 )
{
return QgsGeometry();
}
lineCoordinates.push_back( currentPointList );
return QgsGeometry();
}
else
{
currentPosList = currentLineStringElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "posList" ) );
if ( currentPosList.size() < 1 )
{
return QgsGeometry();
}
QgsPolylineXY currentPointList;
if ( readGMLPositions( currentPointList, currentPosList.at( 0 ).toElement() ) != 0 )
{
return QgsGeometry();
}
lineCoordinates.push_back( currentPointList );
}
}
}
else
{
return QgsGeometry();
}
}
int nLines = lineCoordinates.size();
if ( nLines < 1 )
return QgsGeometry();
//calculate the required wkb size
int size = ( lineCoordinates.size() + 1 ) * ( 1 + 2 * sizeof( int ) );
for ( QList< QgsPolylineXY >::const_iterator it = lineCoordinates.constBegin(); it != lineCoordinates.constEnd(); ++it )
{
size += it->size() * 2 * sizeof( double );
}
QgsWkbTypes::Type type = QgsWkbTypes::MultiLineString;
unsigned char *wkb = new unsigned char[size];
//fill the wkb content
char e = htonl( 1 ) != 1;
int wkbPosition = 0; //current offset from wkb beginning (in bytes)
int nPoints; //number of points in a line
double x, y;
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
memcpy( &( wkb )[wkbPosition], &nLines, sizeof( int ) );
wkbPosition += sizeof( int );
type = QgsWkbTypes::LineString;
for ( QList< QgsPolylineXY >::const_iterator it = lineCoordinates.constBegin(); it != lineCoordinates.constEnd(); ++it )
{
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
nPoints = it->size();
memcpy( &( wkb )[wkbPosition], &nPoints, sizeof( int ) );
wkbPosition += sizeof( int );
for ( QgsPolylineXY::const_iterator iter = it->begin(); iter != it->end(); ++iter )
{
x = iter->x();
y = iter->y();
// QgsDebugMsg( QStringLiteral( "x, y is %1,%2" ).arg( x, 'f' ).arg( y, 'f' ) );
memcpy( &( wkb )[wkbPosition], &x, sizeof( double ) );
wkbPosition += sizeof( double );
memcpy( &( wkb )[wkbPosition], &y, sizeof( double ) );
wkbPosition += sizeof( double );
}
}
QgsGeometry g;
g.fromWkb( wkb, size );
return g;
}
QgsGeometry QgsOgcUtils::geometryFromGMLMultiPolygon( const QDomElement &geometryElement )
{
//first list: different polygons, second list: different rings, third list: different points
QgsMultiPolygonXY multiPolygonPoints;
QDomElement currentPolygonMemberElement;
QDomNodeList polygonList;
QDomElement currentPolygonElement;
// rings in GML2
QDomNodeList outerBoundaryList;
QDomElement currentOuterBoundaryElement;
QDomNodeList innerBoundaryList;
QDomElement currentInnerBoundaryElement;
// rings in GML3
QDomNodeList exteriorList;
QDomElement currentExteriorElement;
QDomElement currentInteriorElement;
QDomNodeList interiorList;
// lienar ring
QDomNodeList linearRingNodeList;
QDomElement currentLinearRingElement;
// Coordinates or position list
QDomNodeList currentCoordinateList;
QDomNodeList currentPosList;
QDomNodeList polygonMemberList = geometryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "polygonMember" ) );
QgsPolygonXY currentPolygonList;
for ( int i = 0; i < polygonMemberList.size(); ++i )
{
currentPolygonList.resize( 0 ); // preserve capacity - don't use clear
currentPolygonMemberElement = polygonMemberList.at( i ).toElement();
polygonList = currentPolygonMemberElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "Polygon" ) );
if ( polygonList.size() < 1 )
{
continue;
}
currentPolygonElement = polygonList.at( 0 ).toElement();
//find exterior ring
outerBoundaryList = currentPolygonElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "outerBoundaryIs" ) );
if ( !outerBoundaryList.isEmpty() )
{
currentOuterBoundaryElement = outerBoundaryList.at( 0 ).toElement();
QgsPolylineXY ringCoordinates;
linearRingNodeList = currentOuterBoundaryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "LinearRing" ) );
if ( linearRingNodeList.size() < 1 )
{
continue;
}
currentLinearRingElement = linearRingNodeList.at( 0 ).toElement();
currentCoordinateList = currentLinearRingElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "coordinates" ) );
if ( currentCoordinateList.size() < 1 )
{
continue;
}
if ( readGMLCoordinates( ringCoordinates, currentCoordinateList.at( 0 ).toElement() ) != 0 )
{
continue;
}
currentPolygonList.push_back( ringCoordinates );
//find interior rings
QDomNodeList innerBoundaryList = currentPolygonElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "innerBoundaryIs" ) );
for ( int j = 0; j < innerBoundaryList.size(); ++j )
{
QgsPolylineXY ringCoordinates;
currentInnerBoundaryElement = innerBoundaryList.at( j ).toElement();
linearRingNodeList = currentInnerBoundaryElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "LinearRing" ) );
if ( linearRingNodeList.size() < 1 )
{
continue;
}
currentLinearRingElement = linearRingNodeList.at( 0 ).toElement();
currentCoordinateList = currentLinearRingElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "coordinates" ) );
if ( currentCoordinateList.size() < 1 )
{
continue;
}
if ( readGMLCoordinates( ringCoordinates, currentCoordinateList.at( 0 ).toElement() ) != 0 )
{
continue;
}
currentPolygonList.push_back( ringCoordinates );
}
}
else
{
//find exterior ring
exteriorList = currentPolygonElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "exterior" ) );
if ( exteriorList.size() < 1 )
{
continue;
}
currentExteriorElement = exteriorList.at( 0 ).toElement();
QgsPolylineXY ringPositions;
linearRingNodeList = currentExteriorElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "LinearRing" ) );
if ( linearRingNodeList.size() < 1 )
{
continue;
}
currentLinearRingElement = linearRingNodeList.at( 0 ).toElement();
currentPosList = currentLinearRingElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "posList" ) );
if ( currentPosList.size() < 1 )
{
continue;
}
if ( readGMLPositions( ringPositions, currentPosList.at( 0 ).toElement() ) != 0 )
{
continue;
}
currentPolygonList.push_back( ringPositions );
//find interior rings
QDomNodeList interiorList = currentPolygonElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "interior" ) );
for ( int j = 0; j < interiorList.size(); ++j )
{
QgsPolylineXY ringPositions;
currentInteriorElement = interiorList.at( j ).toElement();
linearRingNodeList = currentInteriorElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "LinearRing" ) );
if ( linearRingNodeList.size() < 1 )
{
continue;
}
currentLinearRingElement = linearRingNodeList.at( 0 ).toElement();
currentPosList = currentLinearRingElement.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "posList" ) );
if ( currentPosList.size() < 1 )
{
continue;
}
if ( readGMLPositions( ringPositions, currentPosList.at( 0 ).toElement() ) != 0 )
{
continue;
}
currentPolygonList.push_back( ringPositions );
}
}
multiPolygonPoints.push_back( currentPolygonList );
}
int nPolygons = multiPolygonPoints.size();
if ( nPolygons < 1 )
return QgsGeometry();
int size = 1 + 2 * sizeof( int );
//calculate the wkb size
for ( QgsMultiPolygonXY::const_iterator it = multiPolygonPoints.constBegin(); it != multiPolygonPoints.constEnd(); ++it )
{
size += 1 + 2 * sizeof( int );
for ( QgsPolygonXY::const_iterator iter = it->begin(); iter != it->end(); ++iter )
{
size += sizeof( int ) + 2 * iter->size() * sizeof( double );
}
}
QgsWkbTypes::Type type = QgsWkbTypes::MultiPolygon;
unsigned char *wkb = new unsigned char[size];
char e = htonl( 1 ) != 1;
int wkbPosition = 0; //current offset from wkb beginning (in bytes)
double x, y;
int nRings;
int nPointsInRing;
//fill the contents into *wkb
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
memcpy( &( wkb )[wkbPosition], &nPolygons, sizeof( int ) );
wkbPosition += sizeof( int );
type = QgsWkbTypes::Polygon;
for ( QgsMultiPolygonXY::const_iterator it = multiPolygonPoints.constBegin(); it != multiPolygonPoints.constEnd(); ++it )
{
memcpy( &( wkb )[wkbPosition], &e, 1 );
wkbPosition += 1;
memcpy( &( wkb )[wkbPosition], &type, sizeof( int ) );
wkbPosition += sizeof( int );
nRings = it->size();
memcpy( &( wkb )[wkbPosition], &nRings, sizeof( int ) );
wkbPosition += sizeof( int );
for ( QgsPolygonXY::const_iterator iter = it->begin(); iter != it->end(); ++iter )
{
nPointsInRing = iter->size();
memcpy( &( wkb )[wkbPosition], &nPointsInRing, sizeof( int ) );
wkbPosition += sizeof( int );
for ( QgsPolylineXY::const_iterator iterator = iter->begin(); iterator != iter->end(); ++iterator )
{
x = iterator->x();
y = iterator->y();
memcpy( &( wkb )[wkbPosition], &x, sizeof( double ) );
wkbPosition += sizeof( double );
memcpy( &( wkb )[wkbPosition], &y, sizeof( double ) );
wkbPosition += sizeof( double );
}
}
}
QgsGeometry g;
g.fromWkb( wkb, size );
return g;
}
bool QgsOgcUtils::readGMLCoordinates( QgsPolylineXY &coords, const QDomElement &elem )
{
QString coordSeparator = QStringLiteral( "," );
QString tupelSeparator = QStringLiteral( " " );
//"decimal" has to be "."
coords.clear();
if ( elem.hasAttribute( QStringLiteral( "cs" ) ) )
{
coordSeparator = elem.attribute( QStringLiteral( "cs" ) );
}
if ( elem.hasAttribute( QStringLiteral( "ts" ) ) )
{
tupelSeparator = elem.attribute( QStringLiteral( "ts" ) );
}
QStringList tupels = elem.text().split( tupelSeparator, QString::SkipEmptyParts );
QStringList tuple_coords;
double x, y;
bool conversionSuccess;
QStringList::const_iterator it;
for ( it = tupels.constBegin(); it != tupels.constEnd(); ++it )
{
tuple_coords = ( *it ).split( coordSeparator, QString::SkipEmptyParts );
if ( tuple_coords.size() < 2 )
{
continue;
}
x = tuple_coords.at( 0 ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
{
return true;
}
y = tuple_coords.at( 1 ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
{
return true;
}
coords.push_back( QgsPointXY( x, y ) );
}
return false;
}
QgsRectangle QgsOgcUtils::rectangleFromGMLBox( const QDomNode &boxNode )
{
QgsRectangle rect;
QDomElement boxElem = boxNode.toElement();
if ( boxElem.tagName() != QLatin1String( "Box" ) )
return rect;
QDomElement bElem = boxElem.firstChild().toElement();
QString coordSeparator = QStringLiteral( "," );
QString tupelSeparator = QStringLiteral( " " );
if ( bElem.hasAttribute( QStringLiteral( "cs" ) ) )
{
coordSeparator = bElem.attribute( QStringLiteral( "cs" ) );
}
if ( bElem.hasAttribute( QStringLiteral( "ts" ) ) )
{
tupelSeparator = bElem.attribute( QStringLiteral( "ts" ) );
}
QString bString = bElem.text();
bool ok1, ok2, ok3, ok4;
double xmin = bString.section( tupelSeparator, 0, 0 ).section( coordSeparator, 0, 0 ).toDouble( &ok1 );
double ymin = bString.section( tupelSeparator, 0, 0 ).section( coordSeparator, 1, 1 ).toDouble( &ok2 );
double xmax = bString.section( tupelSeparator, 1, 1 ).section( coordSeparator, 0, 0 ).toDouble( &ok3 );
double ymax = bString.section( tupelSeparator, 1, 1 ).section( coordSeparator, 1, 1 ).toDouble( &ok4 );
if ( ok1 && ok2 && ok3 && ok4 )
{
rect = QgsRectangle( xmin, ymin, xmax, ymax );
rect.normalize();
}
return rect;
}
bool QgsOgcUtils::readGMLPositions( QgsPolylineXY &coords, const QDomElement &elem )
{
coords.clear();
QStringList pos = elem.text().split( ' ', QString::SkipEmptyParts );
double x, y;
bool conversionSuccess;
int posSize = pos.size();
int srsDimension = 2;
if ( elem.hasAttribute( QStringLiteral( "srsDimension" ) ) )
{
srsDimension = elem.attribute( QStringLiteral( "srsDimension" ) ).toInt( &conversionSuccess );
if ( !conversionSuccess )
{
srsDimension = 2;
}
}
else if ( elem.hasAttribute( QStringLiteral( "dimension" ) ) )
{
srsDimension = elem.attribute( QStringLiteral( "dimension" ) ).toInt( &conversionSuccess );
if ( !conversionSuccess )
{
srsDimension = 2;
}
}
for ( int i = 0; i < posSize / srsDimension; i++ )
{
x = pos.at( i * srsDimension ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
{
return true;
}
y = pos.at( i * srsDimension + 1 ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
{
return true;
}
coords.push_back( QgsPointXY( x, y ) );
}
return false;
}
QgsRectangle QgsOgcUtils::rectangleFromGMLEnvelope( const QDomNode &envelopeNode )
{
QgsRectangle rect;
QDomElement envelopeElem = envelopeNode.toElement();
if ( envelopeElem.tagName() != QLatin1String( "Envelope" ) )
return rect;
QDomNodeList lowerCornerList = envelopeElem.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "lowerCorner" ) );
if ( lowerCornerList.size() < 1 )
return rect;
QDomNodeList upperCornerList = envelopeElem.elementsByTagNameNS( GML_NAMESPACE, QStringLiteral( "upperCorner" ) );
if ( upperCornerList.size() < 1 )
return rect;
bool conversionSuccess;
int srsDimension = 2;
QDomElement elem = lowerCornerList.at( 0 ).toElement();
if ( elem.hasAttribute( QStringLiteral( "srsDimension" ) ) )
{
srsDimension = elem.attribute( QStringLiteral( "srsDimension" ) ).toInt( &conversionSuccess );
if ( !conversionSuccess )
{
srsDimension = 2;
}
}
else if ( elem.hasAttribute( QStringLiteral( "dimension" ) ) )
{
srsDimension = elem.attribute( QStringLiteral( "dimension" ) ).toInt( &conversionSuccess );
if ( !conversionSuccess )
{
srsDimension = 2;
}
}
QString bString = elem.text();
double xmin = bString.section( ' ', 0, 0 ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
return rect;
double ymin = bString.section( ' ', 1, 1 ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
return rect;
elem = upperCornerList.at( 0 ).toElement();
if ( elem.hasAttribute( QStringLiteral( "srsDimension" ) ) )
{
srsDimension = elem.attribute( QStringLiteral( "srsDimension" ) ).toInt( &conversionSuccess );
if ( !conversionSuccess )
{
srsDimension = 2;
}
}
else if ( elem.hasAttribute( QStringLiteral( "dimension" ) ) )
{
srsDimension = elem.attribute( QStringLiteral( "dimension" ) ).toInt( &conversionSuccess );
if ( !conversionSuccess )
{
srsDimension = 2;
}
}
Q_UNUSED( srsDimension )
bString = elem.text();
double xmax = bString.section( ' ', 0, 0 ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
return rect;
double ymax = bString.section( ' ', 1, 1 ).toDouble( &conversionSuccess );
if ( !conversionSuccess )
return rect;
rect = QgsRectangle( xmin, ymin, xmax, ymax );
rect.normalize();
return rect;
}
QDomElement QgsOgcUtils::rectangleToGMLBox( QgsRectangle *box, QDomDocument &doc, int precision )
{
return rectangleToGMLBox( box, doc, QString(), false, precision );
}
QDomElement QgsOgcUtils::rectangleToGMLBox( QgsRectangle *box, QDomDocument &doc,
const QString &srsName,
bool invertAxisOrientation,
int precision )
{
if ( !box )
{
return QDomElement();
}
QDomElement boxElem = doc.createElement( QStringLiteral( "gml:Box" ) );
if ( !srsName.isEmpty() )
{
boxElem.setAttribute( QStringLiteral( "srsName" ), srsName );
}
QDomElement coordElem = doc.createElement( QStringLiteral( "gml:coordinates" ) );
coordElem.setAttribute( QStringLiteral( "cs" ), QStringLiteral( "," ) );
coordElem.setAttribute( QStringLiteral( "ts" ), QStringLiteral( " " ) );
QString coordString;
coordString += qgsDoubleToString( invertAxisOrientation ? box->yMinimum() : box->xMinimum(), precision );
coordString += ',';
coordString += qgsDoubleToString( invertAxisOrientation ? box->xMinimum() : box->yMinimum(), precision );
coordString += ' ';
coordString += qgsDoubleToString( invertAxisOrientation ? box->yMaximum() : box->xMaximum(), precision );
coordString += ',';
coordString += qgsDoubleToString( invertAxisOrientation ? box->xMaximum() : box->yMaximum(), precision );
QDomText coordText = doc.createTextNode( coordString );
coordElem.appendChild( coordText );
boxElem.appendChild( coordElem );
return boxElem;
}
QDomElement QgsOgcUtils::rectangleToGMLEnvelope( QgsRectangle *env, QDomDocument &doc, int precision )
{
return rectangleToGMLEnvelope( env, doc, QString(), false, precision );
}
QDomElement QgsOgcUtils::rectangleToGMLEnvelope( QgsRectangle *env, QDomDocument &doc,
const QString &srsName,
bool invertAxisOrientation,
int precision )
{
if ( !env )
{
return QDomElement();
}
QDomElement envElem = doc.createElement( QStringLiteral( "gml:Envelope" ) );
if ( !srsName.isEmpty() )
{
envElem.setAttribute( QStringLiteral( "srsName" ), srsName );
}
QString posList;
QDomElement lowerCornerElem = doc.createElement( QStringLiteral( "gml:lowerCorner" ) );
posList = qgsDoubleToString( invertAxisOrientation ? env->yMinimum() : env->xMinimum(), precision );
posList += ' ';
posList += qgsDoubleToString( invertAxisOrientation ? env->xMinimum() : env->yMinimum(), precision );
QDomText lowerCornerText = doc.createTextNode( posList );
lowerCornerElem.appendChild( lowerCornerText );
envElem.appendChild( lowerCornerElem );
QDomElement upperCornerElem = doc.createElement( QStringLiteral( "gml:upperCorner" ) );
posList = qgsDoubleToString( invertAxisOrientation ? env->yMaximum() : env->xMaximum(), precision );
posList += ' ';
posList += qgsDoubleToString( invertAxisOrientation ? env->xMaximum() : env->yMaximum(), precision );
QDomText upperCornerText = doc.createTextNode( posList );
upperCornerElem.appendChild( upperCornerText );
envElem.appendChild( upperCornerElem );
return envElem;
}
QDomElement QgsOgcUtils::geometryToGML( const QgsGeometry &geometry, QDomDocument &doc, const QString &format, int precision )
{
return geometryToGML( geometry, doc, ( format == QLatin1String( "GML2" ) ) ? GML_2_1_2 : GML_3_2_1, QString(), false, QString(), precision );
}
QDomElement QgsOgcUtils::geometryToGML( const QgsGeometry &geometry, QDomDocument &doc,
GMLVersion gmlVersion,
const QString &srsName,
bool invertAxisOrientation,
const QString &gmlIdBase,
int precision )
{
if ( geometry.isNull() )
return QDomElement();
// coordinate separator
QString cs = QStringLiteral( "," );
// tuple separator
QString ts = QStringLiteral( " " );
// coord element tagname
QDomElement baseCoordElem;
bool hasZValue = false;
QByteArray wkb( geometry.asWkb() );
QgsConstWkbPtr wkbPtr( wkb );
try
{
wkbPtr.readHeader();
}
catch ( const QgsWkbException &e )
{
Q_UNUSED( e )
// WKB exception while reading header
return QDomElement();
}
if ( gmlVersion != GML_2_1_2 )
{
switch ( geometry.wkbType() )
{
case QgsWkbTypes::Point25D:
case QgsWkbTypes::Point:
case QgsWkbTypes::MultiPoint25D:
case QgsWkbTypes::MultiPoint:
baseCoordElem = doc.createElement( QStringLiteral( "gml:pos" ) );
break;
default:
baseCoordElem = doc.createElement( QStringLiteral( "gml:posList" ) );<|fim▁hole|> }
baseCoordElem.setAttribute( QStringLiteral( "srsDimension" ), QStringLiteral( "2" ) );
cs = ' ';
}
else
{
baseCoordElem = doc.createElement( QStringLiteral( "gml:coordinates" ) );
baseCoordElem.setAttribute( QStringLiteral( "cs" ), cs );
baseCoordElem.setAttribute( QStringLiteral( "ts" ), ts );
}
try
{
switch ( geometry.wkbType() )
{
case QgsWkbTypes::Point25D:
case QgsWkbTypes::Point:
{
QDomElement pointElem = doc.createElement( QStringLiteral( "gml:Point" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
pointElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase );
if ( !srsName.isEmpty() )
pointElem.setAttribute( QStringLiteral( "srsName" ), srsName );
QDomElement coordElem = baseCoordElem.cloneNode().toElement();
double x, y;
if ( invertAxisOrientation )
wkbPtr >> y >> x;
else
wkbPtr >> x >> y;
QDomText coordText = doc.createTextNode( qgsDoubleToString( x, precision ) + cs + qgsDoubleToString( y, precision ) );
coordElem.appendChild( coordText );
pointElem.appendChild( coordElem );
return pointElem;
}
case QgsWkbTypes::MultiPoint25D:
hasZValue = true;
//intentional fall-through
FALLTHROUGH
case QgsWkbTypes::MultiPoint:
{
QDomElement multiPointElem = doc.createElement( QStringLiteral( "gml:MultiPoint" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
multiPointElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase );
if ( !srsName.isEmpty() )
multiPointElem.setAttribute( QStringLiteral( "srsName" ), srsName );
int nPoints;
wkbPtr >> nPoints;
for ( int idx = 0; idx < nPoints; ++idx )
{
QDomElement pointMemberElem = doc.createElement( QStringLiteral( "gml:pointMember" ) );
QDomElement pointElem = doc.createElement( QStringLiteral( "gml:Point" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
pointElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase + QStringLiteral( ".%1" ).arg( idx + 1 ) );
QDomElement coordElem = baseCoordElem.cloneNode().toElement();
wkbPtr.readHeader();
double x, y;
if ( invertAxisOrientation )
wkbPtr >> y >> x;
else
wkbPtr >> x >> y;
QDomText coordText = doc.createTextNode( qgsDoubleToString( x, precision ) + cs + qgsDoubleToString( y, precision ) );
coordElem.appendChild( coordText );
pointElem.appendChild( coordElem );
if ( hasZValue )
{
wkbPtr += sizeof( double );
}
pointMemberElem.appendChild( pointElem );
multiPointElem.appendChild( pointMemberElem );
}
return multiPointElem;
}
case QgsWkbTypes::LineString25D:
hasZValue = true;
//intentional fall-through
FALLTHROUGH
case QgsWkbTypes::LineString:
{
QDomElement lineStringElem = doc.createElement( QStringLiteral( "gml:LineString" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
lineStringElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase );
if ( !srsName.isEmpty() )
lineStringElem.setAttribute( QStringLiteral( "srsName" ), srsName );
// get number of points in the line
int nPoints;
wkbPtr >> nPoints;
QDomElement coordElem = baseCoordElem.cloneNode().toElement();
QString coordString;
for ( int idx = 0; idx < nPoints; ++idx )
{
if ( idx != 0 )
{
coordString += ts;
}
double x, y;
if ( invertAxisOrientation )
wkbPtr >> y >> x;
else
wkbPtr >> x >> y;
coordString += qgsDoubleToString( x, precision ) + cs + qgsDoubleToString( y, precision );
if ( hasZValue )
{
wkbPtr += sizeof( double );
}
}
QDomText coordText = doc.createTextNode( coordString );
coordElem.appendChild( coordText );
lineStringElem.appendChild( coordElem );
return lineStringElem;
}
case QgsWkbTypes::MultiLineString25D:
hasZValue = true;
//intentional fall-through
FALLTHROUGH
case QgsWkbTypes::MultiLineString:
{
QDomElement multiLineStringElem = doc.createElement( QStringLiteral( "gml:MultiLineString" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
multiLineStringElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase );
if ( !srsName.isEmpty() )
multiLineStringElem.setAttribute( QStringLiteral( "srsName" ), srsName );
int nLines;
wkbPtr >> nLines;
for ( int jdx = 0; jdx < nLines; jdx++ )
{
QDomElement lineStringMemberElem = doc.createElement( QStringLiteral( "gml:lineStringMember" ) );
QDomElement lineStringElem = doc.createElement( QStringLiteral( "gml:LineString" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
lineStringElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase + QStringLiteral( ".%1" ).arg( jdx + 1 ) );
wkbPtr.readHeader();
int nPoints;
wkbPtr >> nPoints;
QDomElement coordElem = baseCoordElem.cloneNode().toElement();
QString coordString;
for ( int idx = 0; idx < nPoints; idx++ )
{
if ( idx != 0 )
{
coordString += ts;
}
double x, y;
if ( invertAxisOrientation )
wkbPtr >> y >> x;
else
wkbPtr >> x >> y;
coordString += qgsDoubleToString( x, precision ) + cs + qgsDoubleToString( y, precision );
if ( hasZValue )
{
wkbPtr += sizeof( double );
}
}
QDomText coordText = doc.createTextNode( coordString );
coordElem.appendChild( coordText );
lineStringElem.appendChild( coordElem );
lineStringMemberElem.appendChild( lineStringElem );
multiLineStringElem.appendChild( lineStringMemberElem );
}
return multiLineStringElem;
}
case QgsWkbTypes::Polygon25D:
hasZValue = true;
//intentional fall-through
FALLTHROUGH
case QgsWkbTypes::Polygon:
{
QDomElement polygonElem = doc.createElement( QStringLiteral( "gml:Polygon" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
polygonElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase );
if ( !srsName.isEmpty() )
polygonElem.setAttribute( QStringLiteral( "srsName" ), srsName );
// get number of rings in the polygon
int numRings;
wkbPtr >> numRings;
if ( numRings == 0 ) // sanity check for zero rings in polygon
return QDomElement();
int *ringNumPoints = new int[numRings]; // number of points in each ring
for ( int idx = 0; idx < numRings; idx++ )
{
QString boundaryName = ( gmlVersion == GML_2_1_2 ) ? "gml:outerBoundaryIs" : "gml:exterior";
if ( idx != 0 )
{
boundaryName = ( gmlVersion == GML_2_1_2 ) ? "gml:innerBoundaryIs" : "gml:interior";
}
QDomElement boundaryElem = doc.createElement( boundaryName );
QDomElement ringElem = doc.createElement( QStringLiteral( "gml:LinearRing" ) );
// get number of points in the ring
int nPoints;
wkbPtr >> nPoints;
ringNumPoints[idx] = nPoints;
QDomElement coordElem = baseCoordElem.cloneNode().toElement();
QString coordString;
for ( int jdx = 0; jdx < nPoints; jdx++ )
{
if ( jdx != 0 )
{
coordString += ts;
}
double x, y;
if ( invertAxisOrientation )
wkbPtr >> y >> x;
else
wkbPtr >> x >> y;
coordString += qgsDoubleToString( x, precision ) + cs + qgsDoubleToString( y, precision );
if ( hasZValue )
{
wkbPtr += sizeof( double );
}
}
QDomText coordText = doc.createTextNode( coordString );
coordElem.appendChild( coordText );
ringElem.appendChild( coordElem );
boundaryElem.appendChild( ringElem );
polygonElem.appendChild( boundaryElem );
}
delete [] ringNumPoints;
return polygonElem;
}
case QgsWkbTypes::MultiPolygon25D:
hasZValue = true;
//intentional fall-through
FALLTHROUGH
case QgsWkbTypes::MultiPolygon:
{
QDomElement multiPolygonElem = doc.createElement( QStringLiteral( "gml:MultiPolygon" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
multiPolygonElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase );
if ( !srsName.isEmpty() )
multiPolygonElem.setAttribute( QStringLiteral( "srsName" ), srsName );
int numPolygons;
wkbPtr >> numPolygons;
for ( int kdx = 0; kdx < numPolygons; kdx++ )
{
QDomElement polygonMemberElem = doc.createElement( QStringLiteral( "gml:polygonMember" ) );
QDomElement polygonElem = doc.createElement( QStringLiteral( "gml:Polygon" ) );
if ( gmlVersion == GML_3_2_1 && !gmlIdBase.isEmpty() )
polygonElem.setAttribute( QStringLiteral( "gml:id" ), gmlIdBase + QStringLiteral( ".%1" ).arg( kdx + 1 ) );
wkbPtr.readHeader();
int numRings;
wkbPtr >> numRings;
for ( int idx = 0; idx < numRings; idx++ )
{
QString boundaryName = ( gmlVersion == GML_2_1_2 ) ? "gml:outerBoundaryIs" : "gml:exterior";
if ( idx != 0 )
{
boundaryName = ( gmlVersion == GML_2_1_2 ) ? "gml:innerBoundaryIs" : "gml:interior";
}
QDomElement boundaryElem = doc.createElement( boundaryName );
QDomElement ringElem = doc.createElement( QStringLiteral( "gml:LinearRing" ) );
int nPoints;
wkbPtr >> nPoints;
QDomElement coordElem = baseCoordElem.cloneNode().toElement();
QString coordString;
for ( int jdx = 0; jdx < nPoints; jdx++ )
{
if ( jdx != 0 )
{
coordString += ts;
}
double x, y;
if ( invertAxisOrientation )
wkbPtr >> y >> x;
else
wkbPtr >> x >> y;
coordString += qgsDoubleToString( x, precision ) + cs + qgsDoubleToString( y, precision );
if ( hasZValue )
{
wkbPtr += sizeof( double );
}
}
QDomText coordText = doc.createTextNode( coordString );
coordElem.appendChild( coordText );
ringElem.appendChild( coordElem );
boundaryElem.appendChild( ringElem );
polygonElem.appendChild( boundaryElem );
polygonMemberElem.appendChild( polygonElem );
multiPolygonElem.appendChild( polygonMemberElem );
}
}
return multiPolygonElem;
}
default:
return QDomElement();
}
}
catch ( const QgsWkbException &e )
{
Q_UNUSED( e )
return QDomElement();
}
}
QDomElement QgsOgcUtils::geometryToGML( const QgsGeometry &geometry, QDomDocument &doc, int precision )
{
return geometryToGML( geometry, doc, QStringLiteral( "GML2" ), precision );
}
QDomElement QgsOgcUtils::createGMLCoordinates( const QgsPolylineXY &points, QDomDocument &doc )
{
QDomElement coordElem = doc.createElement( QStringLiteral( "gml:coordinates" ) );
coordElem.setAttribute( QStringLiteral( "cs" ), QStringLiteral( "," ) );
coordElem.setAttribute( QStringLiteral( "ts" ), QStringLiteral( " " ) );
QString coordString;
QVector<QgsPointXY>::const_iterator pointIt = points.constBegin();
for ( ; pointIt != points.constEnd(); ++pointIt )
{
if ( pointIt != points.constBegin() )
{
coordString += ' ';
}
coordString += qgsDoubleToString( pointIt->x() );
coordString += ',';
coordString += qgsDoubleToString( pointIt->y() );
}
QDomText coordText = doc.createTextNode( coordString );
coordElem.appendChild( coordText );
return coordElem;
}
QDomElement QgsOgcUtils::createGMLPositions( const QgsPolylineXY &points, QDomDocument &doc )
{
QDomElement posElem = doc.createElement( QStringLiteral( "gml:pos" ) );
if ( points.size() > 1 )
posElem = doc.createElement( QStringLiteral( "gml:posList" ) );
posElem.setAttribute( QStringLiteral( "srsDimension" ), QStringLiteral( "2" ) );
QString coordString;
QVector<QgsPointXY>::const_iterator pointIt = points.constBegin();
for ( ; pointIt != points.constEnd(); ++pointIt )
{
if ( pointIt != points.constBegin() )
{
coordString += ' ';
}
coordString += qgsDoubleToString( pointIt->x() );
coordString += ' ';
coordString += qgsDoubleToString( pointIt->y() );
}
QDomText coordText = doc.createTextNode( coordString );
posElem.appendChild( coordText );
return posElem;
}
// -----------------------------------------
QColor QgsOgcUtils::colorFromOgcFill( const QDomElement &fillElement )
{
if ( fillElement.isNull() || !fillElement.hasChildNodes() )
{
return QColor();
}
QString cssName;
QString elemText;
QColor color;
QDomElement cssElem = fillElement.firstChildElement( QStringLiteral( "CssParameter" ) );
while ( !cssElem.isNull() )
{
cssName = cssElem.attribute( QStringLiteral( "name" ), QStringLiteral( "not_found" ) );
if ( cssName != QLatin1String( "not_found" ) )
{
elemText = cssElem.text();
if ( cssName == QLatin1String( "fill" ) )
{
color.setNamedColor( elemText );
}
else if ( cssName == QLatin1String( "fill-opacity" ) )
{
bool ok;
double opacity = elemText.toDouble( &ok );
if ( ok )
{
color.setAlphaF( opacity );
}
}
}
cssElem = cssElem.nextSiblingElement( QStringLiteral( "CssParameter" ) );
}
return color;
}
QgsExpression *QgsOgcUtils::expressionFromOgcFilter( const QDomElement &element, QgsVectorLayer *layer )
{
return expressionFromOgcFilter( element, QgsOgcUtils::FILTER_OGC_1_0, layer );
}
QgsExpression *QgsOgcUtils::expressionFromOgcFilter( const QDomElement &element, const FilterVersion version, QgsVectorLayer *layer )
{
if ( element.isNull() || !element.hasChildNodes() )
return nullptr;
QgsExpression *expr = new QgsExpression();
// check if it is a single string value not having DOM elements
// that express OGC operators
if ( element.firstChild().nodeType() == QDomNode::TextNode )
{
expr->setExpression( element.firstChild().nodeValue() );
return expr;
}
QgsOgcUtilsExpressionFromFilter utils( version, layer );
// then check OGC DOM elements that contain OGC tags specifying
// OGC operators.
QDomElement childElem = element.firstChildElement();
while ( !childElem.isNull() )
{
QgsExpressionNode *node = utils.nodeFromOgcFilter( childElem );
if ( !node )
{
// invalid expression, parser error
expr->d->mParserErrorString = utils.errorMessage();
return expr;
}
// use the concat binary operator to append to the root node
if ( !expr->d->mRootNode )
{
expr->d->mRootNode = node;
}
else
{
expr->d->mRootNode = new QgsExpressionNodeBinaryOperator( QgsExpressionNodeBinaryOperator::boConcat, expr->d->mRootNode, node );
}
childElem = childElem.nextSiblingElement();
}
// update expression string
expr->d->mExp = expr->dump();
return expr;
}
static const QMap<QString, int> BINARY_OPERATORS_TAG_NAMES_MAP
{
// logical
{ QStringLiteral( "Or" ), QgsExpressionNodeBinaryOperator::boOr },
{ QStringLiteral( "And" ), QgsExpressionNodeBinaryOperator::boAnd },
// comparison
{ QStringLiteral( "PropertyIsEqualTo" ), QgsExpressionNodeBinaryOperator::boEQ },
{ QStringLiteral( "PropertyIsNotEqualTo" ), QgsExpressionNodeBinaryOperator::boNE },
{ QStringLiteral( "PropertyIsLessThanOrEqualTo" ), QgsExpressionNodeBinaryOperator::boLE },
{ QStringLiteral( "PropertyIsGreaterThanOrEqualTo" ), QgsExpressionNodeBinaryOperator::boGE },
{ QStringLiteral( "PropertyIsLessThan" ), QgsExpressionNodeBinaryOperator::boLT },
{ QStringLiteral( "PropertyIsGreaterThan" ), QgsExpressionNodeBinaryOperator::boGT },
{ QStringLiteral( "PropertyIsLike" ), QgsExpressionNodeBinaryOperator::boLike },
// arithmetic
{ QStringLiteral( "Add" ), QgsExpressionNodeBinaryOperator::boPlus },
{ QStringLiteral( "Sub" ), QgsExpressionNodeBinaryOperator::boMinus },
{ QStringLiteral( "Mul" ), QgsExpressionNodeBinaryOperator::boMul },
{ QStringLiteral( "Div" ), QgsExpressionNodeBinaryOperator::boDiv },
};
static int binaryOperatorFromTagName( const QString &tagName )
{
return BINARY_OPERATORS_TAG_NAMES_MAP.value( tagName, -1 );
}
static QString binaryOperatorToTagName( QgsExpressionNodeBinaryOperator::BinaryOperator op )
{
if ( op == QgsExpressionNodeBinaryOperator::boILike )
{
return QStringLiteral( "PropertyIsLike" );
}
return BINARY_OPERATORS_TAG_NAMES_MAP.key( op, QString() );
}
static bool isBinaryOperator( const QString &tagName )
{
return binaryOperatorFromTagName( tagName ) >= 0;
}
static bool isSpatialOperator( const QString &tagName )
{
static QStringList spatialOps;
if ( spatialOps.isEmpty() )
{
spatialOps << QStringLiteral( "BBOX" ) << QStringLiteral( "Intersects" ) << QStringLiteral( "Contains" ) << QStringLiteral( "Crosses" ) << QStringLiteral( "Equals" )
<< QStringLiteral( "Disjoint" ) << QStringLiteral( "Overlaps" ) << QStringLiteral( "Touches" ) << QStringLiteral( "Within" );
}
return spatialOps.contains( tagName );
}
QgsExpressionNode *QgsOgcUtils::nodeFromOgcFilter( QDomElement &element, QString &errorMessage, QgsVectorLayer *layer )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0, layer );
QgsExpressionNode *node = utils.nodeFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNodeBinaryOperator *QgsOgcUtils::nodeBinaryOperatorFromOgcFilter( QDomElement &element, QString &errorMessage, QgsVectorLayer *layer )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0, layer );
QgsExpressionNodeBinaryOperator *node = utils.nodeBinaryOperatorFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNodeFunction *QgsOgcUtils::nodeSpatialOperatorFromOgcFilter( QDomElement &element, QString &errorMessage )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0 );
QgsExpressionNodeFunction *node = utils.nodeSpatialOperatorFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNodeUnaryOperator *QgsOgcUtils::nodeNotFromOgcFilter( QDomElement &element, QString &errorMessage )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0 );
QgsExpressionNodeUnaryOperator *node = utils.nodeNotFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNodeFunction *QgsOgcUtils::nodeFunctionFromOgcFilter( QDomElement &element, QString &errorMessage )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0 );
QgsExpressionNodeFunction *node = utils.nodeFunctionFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNode *QgsOgcUtils::nodeLiteralFromOgcFilter( QDomElement &element, QString &errorMessage, QgsVectorLayer *layer )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0, layer );
QgsExpressionNode *node = utils.nodeLiteralFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNodeColumnRef *QgsOgcUtils::nodeColumnRefFromOgcFilter( QDomElement &element, QString &errorMessage )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0 );
QgsExpressionNodeColumnRef *node = utils.nodeColumnRefFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNode *QgsOgcUtils::nodeIsBetweenFromOgcFilter( QDomElement &element, QString &errorMessage )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0 );
QgsExpressionNode *node = utils.nodeIsBetweenFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
QgsExpressionNodeBinaryOperator *QgsOgcUtils::nodePropertyIsNullFromOgcFilter( QDomElement &element, QString &errorMessage )
{
QgsOgcUtilsExpressionFromFilter utils( QgsOgcUtils::FILTER_OGC_1_0 );
QgsExpressionNodeBinaryOperator *node = utils.nodePropertyIsNullFromOgcFilter( element );
errorMessage = utils.errorMessage();
return node;
}
/////////////////
QDomElement QgsOgcUtils::expressionToOgcFilter( const QgsExpression &exp, QDomDocument &doc, QString *errorMessage )
{
return expressionToOgcFilter( exp, doc, GML_2_1_2, FILTER_OGC_1_0,
QStringLiteral( "geometry" ), QString(), false, false, errorMessage );
}
QDomElement QgsOgcUtils::expressionToOgcExpression( const QgsExpression &exp, QDomDocument &doc, QString *errorMessage )
{
return expressionToOgcExpression( exp, doc, GML_2_1_2, FILTER_OGC_1_0,
QStringLiteral( "geometry" ), QString(), false, false, errorMessage );
}
QDomElement QgsOgcUtils::expressionToOgcFilter( const QgsExpression &expression,
QDomDocument &doc,
GMLVersion gmlVersion,
FilterVersion filterVersion,
const QString &geometryName,
const QString &srsName,
bool honourAxisOrientation,
bool invertAxisOrientation,
QString *errorMessage )
{
if ( !expression.rootNode() )
return QDomElement();
QgsExpression exp = expression;
QgsExpressionContext context;
context << QgsExpressionContextUtils::globalScope();
QgsOgcUtilsExprToFilter utils( doc, gmlVersion, filterVersion, geometryName, srsName, honourAxisOrientation, invertAxisOrientation );
QDomElement exprRootElem = utils.expressionNodeToOgcFilter( exp.rootNode(), &exp, &context );
if ( errorMessage )
*errorMessage = utils.errorMessage();
if ( exprRootElem.isNull() )
return QDomElement();
QDomElement filterElem =
( filterVersion == FILTER_FES_2_0 ) ?
doc.createElementNS( FES_NAMESPACE, QStringLiteral( "fes:Filter" ) ) :
doc.createElementNS( OGC_NAMESPACE, QStringLiteral( "ogc:Filter" ) );
if ( utils.GMLNamespaceUsed() )
{
QDomAttr attr = doc.createAttribute( QStringLiteral( "xmlns:gml" ) );
if ( gmlVersion == GML_3_2_1 )
attr.setValue( GML32_NAMESPACE );
else
attr.setValue( GML_NAMESPACE );
filterElem.setAttributeNode( attr );
}
filterElem.appendChild( exprRootElem );
return filterElem;
}
QDomElement QgsOgcUtils::expressionToOgcExpression( const QgsExpression &expression,
QDomDocument &doc,
GMLVersion gmlVersion,
FilterVersion filterVersion,
const QString &geometryName,
const QString &srsName,
bool honourAxisOrientation,
bool invertAxisOrientation,
QString *errorMessage )
{
QgsExpressionContext context;
context << QgsExpressionContextUtils::globalScope();
QgsExpression exp = expression;
const QgsExpressionNode *node = exp.rootNode();
if ( !node )
return QDomElement();
switch ( node->nodeType() )
{
case QgsExpressionNode::ntFunction:
case QgsExpressionNode::ntLiteral:
case QgsExpressionNode::ntColumnRef:
{
QgsOgcUtilsExprToFilter utils( doc, gmlVersion, filterVersion, geometryName, srsName, honourAxisOrientation, invertAxisOrientation );
QDomElement exprRootElem = utils.expressionNodeToOgcFilter( node, &exp, &context );
if ( errorMessage )
*errorMessage = utils.errorMessage();
if ( !exprRootElem.isNull() )
{
return exprRootElem;
}
break;
}
default:
{
if ( errorMessage )
*errorMessage = QObject::tr( "Node type not supported in expression translation: %1" ).arg( node->nodeType() );
}
}
// got an error
return QDomElement();
}
QDomElement QgsOgcUtils::SQLStatementToOgcFilter( const QgsSQLStatement &statement,
QDomDocument &doc,
GMLVersion gmlVersion,
FilterVersion filterVersion,
const QList<LayerProperties> &layerProperties,
bool honourAxisOrientation,
bool invertAxisOrientation,
const QMap< QString, QString> &mapUnprefixedTypenameToPrefixedTypename,
QString *errorMessage )
{
if ( !statement.rootNode() )
return QDomElement();
QgsOgcUtilsSQLStatementToFilter utils( doc, gmlVersion, filterVersion,
layerProperties, honourAxisOrientation, invertAxisOrientation,
mapUnprefixedTypenameToPrefixedTypename );
QDomElement exprRootElem = utils.toOgcFilter( statement.rootNode() );
if ( errorMessage )
*errorMessage = utils.errorMessage();
if ( exprRootElem.isNull() )
return QDomElement();
QDomElement filterElem =
( filterVersion == FILTER_FES_2_0 ) ?
doc.createElementNS( FES_NAMESPACE, QStringLiteral( "fes:Filter" ) ) :
doc.createElementNS( OGC_NAMESPACE, QStringLiteral( "ogc:Filter" ) );
if ( utils.GMLNamespaceUsed() )
{
QDomAttr attr = doc.createAttribute( QStringLiteral( "xmlns:gml" ) );
if ( gmlVersion == GML_3_2_1 )
attr.setValue( GML32_NAMESPACE );
else
attr.setValue( GML_NAMESPACE );
filterElem.setAttributeNode( attr );
}
filterElem.appendChild( exprRootElem );
return filterElem;
}
//
QDomElement QgsOgcUtilsExprToFilter::expressionNodeToOgcFilter( const QgsExpressionNode *node, QgsExpression *expression, const QgsExpressionContext *context )
{
switch ( node->nodeType() )
{
case QgsExpressionNode::ntUnaryOperator:
return expressionUnaryOperatorToOgcFilter( static_cast<const QgsExpressionNodeUnaryOperator *>( node ), expression, context );
case QgsExpressionNode::ntBinaryOperator:
return expressionBinaryOperatorToOgcFilter( static_cast<const QgsExpressionNodeBinaryOperator *>( node ), expression, context );
case QgsExpressionNode::ntInOperator:
return expressionInOperatorToOgcFilter( static_cast<const QgsExpressionNodeInOperator *>( node ), expression, context );
case QgsExpressionNode::ntFunction:
return expressionFunctionToOgcFilter( static_cast<const QgsExpressionNodeFunction *>( node ), expression, context );
case QgsExpressionNode::ntLiteral:
return expressionLiteralToOgcFilter( static_cast<const QgsExpressionNodeLiteral *>( node ), expression, context );
case QgsExpressionNode::ntColumnRef:
return expressionColumnRefToOgcFilter( static_cast<const QgsExpressionNodeColumnRef *>( node ), expression, context );
default:
mErrorMessage = QObject::tr( "Node type not supported: %1" ).arg( node->nodeType() );
return QDomElement();
}
}
QDomElement QgsOgcUtilsExprToFilter::expressionUnaryOperatorToOgcFilter( const QgsExpressionNodeUnaryOperator *node, QgsExpression *expression, const QgsExpressionContext *context )
{
QDomElement operandElem = expressionNodeToOgcFilter( node->operand(), expression, context );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QDomElement uoElem;
switch ( node->op() )
{
case QgsExpressionNodeUnaryOperator::uoMinus:
uoElem = mDoc.createElement( mFilterPrefix + ":Literal" );
if ( node->operand()->nodeType() == QgsExpressionNode::ntLiteral )
{
// operand expression already created a Literal node:
// take the literal value, prepend - and remove old literal node
uoElem.appendChild( mDoc.createTextNode( "-" + operandElem.text() ) );
mDoc.removeChild( operandElem );
}
else
{
mErrorMessage = QObject::tr( "This use of unary operator not implemented yet" );
return QDomElement();
}
break;
case QgsExpressionNodeUnaryOperator::uoNot:
uoElem = mDoc.createElement( mFilterPrefix + ":Not" );
uoElem.appendChild( operandElem );
break;
default:
mErrorMessage = QObject::tr( "Unary operator '%1' not implemented yet" ).arg( node->text() );
return QDomElement();
}
return uoElem;
}
QDomElement QgsOgcUtilsExprToFilter::expressionBinaryOperatorToOgcFilter( const QgsExpressionNodeBinaryOperator *node, QgsExpression *expression, const QgsExpressionContext *context )
{
QDomElement leftElem = expressionNodeToOgcFilter( node->opLeft(), expression, context );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QgsExpressionNodeBinaryOperator::BinaryOperator op = node->op();
// before right operator is parsed: to allow NULL handling
if ( op == QgsExpressionNodeBinaryOperator::boIs || op == QgsExpressionNodeBinaryOperator::boIsNot )
{
if ( node->opRight()->nodeType() == QgsExpressionNode::ntLiteral )
{
const QgsExpressionNodeLiteral *rightLit = static_cast<const QgsExpressionNodeLiteral *>( node->opRight() );
if ( rightLit->value().isNull() )
{
QDomElement elem = mDoc.createElement( mFilterPrefix + ":PropertyIsNull" );
elem.appendChild( leftElem );
if ( op == QgsExpressionNodeBinaryOperator::boIsNot )
{
QDomElement notElem = mDoc.createElement( mFilterPrefix + ":Not" );
notElem.appendChild( elem );
return notElem;
}
return elem;
}
// continue with equal / not equal operator once the null case is handled
op = ( op == QgsExpressionNodeBinaryOperator::boIs ? QgsExpressionNodeBinaryOperator::boEQ : QgsExpressionNodeBinaryOperator::boNE );
}
}
QDomElement rightElem = expressionNodeToOgcFilter( node->opRight(), expression, context );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QString opText = binaryOperatorToTagName( op );
if ( opText.isEmpty() )
{
// not implemented binary operators
// TODO: regex, % (mod), ^ (pow) are not supported yet
mErrorMessage = QObject::tr( "Binary operator %1 not implemented yet" ).arg( node->text() );
return QDomElement();
}
QDomElement boElem = mDoc.createElement( mFilterPrefix + ":" + opText );
if ( op == QgsExpressionNodeBinaryOperator::boLike || op == QgsExpressionNodeBinaryOperator::boILike )
{
if ( op == QgsExpressionNodeBinaryOperator::boILike )
boElem.setAttribute( QStringLiteral( "matchCase" ), QStringLiteral( "false" ) );
// setup wildCards to <ogc:PropertyIsLike>
boElem.setAttribute( QStringLiteral( "wildCard" ), QStringLiteral( "%" ) );
boElem.setAttribute( QStringLiteral( "singleChar" ), QStringLiteral( "_" ) );
if ( mFilterVersion == QgsOgcUtils::FILTER_OGC_1_0 )
boElem.setAttribute( QStringLiteral( "escape" ), QStringLiteral( "\\" ) );
else
boElem.setAttribute( QStringLiteral( "escapeChar" ), QStringLiteral( "\\" ) );
}
boElem.appendChild( leftElem );
boElem.appendChild( rightElem );
return boElem;
}
QDomElement QgsOgcUtilsExprToFilter::expressionLiteralToOgcFilter( const QgsExpressionNodeLiteral *node, QgsExpression *expression, const QgsExpressionContext *context )
{
Q_UNUSED( expression )
Q_UNUSED( context )
QString value;
switch ( node->value().type() )
{
case QVariant::Int:
value = QString::number( node->value().toInt() );
break;
case QVariant::Double:
value = qgsDoubleToString( node->value().toDouble() );
break;
case QVariant::String:
value = node->value().toString();
break;
case QVariant::Date:
value = node->value().toDate().toString( Qt::ISODate );
break;
case QVariant::DateTime:
value = node->value().toDateTime().toString( Qt::ISODate );
break;
default:
mErrorMessage = QObject::tr( "Literal type not supported: %1" ).arg( node->value().type() );
return QDomElement();
}
QDomElement litElem = mDoc.createElement( mFilterPrefix + ":Literal" );
litElem.appendChild( mDoc.createTextNode( value ) );
return litElem;
}
QDomElement QgsOgcUtilsExprToFilter::expressionColumnRefToOgcFilter( const QgsExpressionNodeColumnRef *node, QgsExpression *expression, const QgsExpressionContext *context )
{
Q_UNUSED( expression )
Q_UNUSED( context )
QDomElement propElem = mDoc.createElement( mFilterPrefix + ":" + mPropertyName );
propElem.appendChild( mDoc.createTextNode( node->name() ) );
return propElem;
}
QDomElement QgsOgcUtilsExprToFilter::expressionInOperatorToOgcFilter( const QgsExpressionNodeInOperator *node, QgsExpression *expression, const QgsExpressionContext *context )
{
if ( node->list()->list().size() == 1 )
return expressionNodeToOgcFilter( node->list()->list()[0], expression, context );
QDomElement orElem = mDoc.createElement( mFilterPrefix + ":Or" );
QDomElement leftNode = expressionNodeToOgcFilter( node->node(), expression, context );
const auto constList = node->list()->list();
for ( QgsExpressionNode *n : constList )
{
QDomElement listNode = expressionNodeToOgcFilter( n, expression, context );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QDomElement eqElem = mDoc.createElement( mFilterPrefix + ":PropertyIsEqualTo" );
eqElem.appendChild( leftNode.cloneNode() );
eqElem.appendChild( listNode );
orElem.appendChild( eqElem );
}
if ( node->isNotIn() )
{
QDomElement notElem = mDoc.createElement( mFilterPrefix + ":Not" );
notElem.appendChild( orElem );
return notElem;
}
return orElem;
}
static const QMap<QString, QString> BINARY_SPATIAL_OPS_MAP
{
{ QStringLiteral( "disjoint" ), QStringLiteral( "Disjoint" ) },
{ QStringLiteral( "intersects" ), QStringLiteral( "Intersects" )},
{ QStringLiteral( "touches" ), QStringLiteral( "Touches" ) },
{ QStringLiteral( "crosses" ), QStringLiteral( "Crosses" ) },
{ QStringLiteral( "contains" ), QStringLiteral( "Contains" ) },
{ QStringLiteral( "overlaps" ), QStringLiteral( "Overlaps" ) },
{ QStringLiteral( "within" ), QStringLiteral( "Within" ) }
};
static bool isBinarySpatialOperator( const QString &fnName )
{
return BINARY_SPATIAL_OPS_MAP.contains( fnName );
}
static QString tagNameForSpatialOperator( const QString &fnName )
{
return BINARY_SPATIAL_OPS_MAP.value( fnName );
}
static bool isGeometryColumn( const QgsExpressionNode *node )
{
if ( node->nodeType() != QgsExpressionNode::ntFunction )
return false;
const QgsExpressionNodeFunction *fn = static_cast<const QgsExpressionNodeFunction *>( node );
QgsExpressionFunction *fd = QgsExpression::Functions()[fn->fnIndex()];
return fd->name() == QLatin1String( "$geometry" );
}
static QgsGeometry geometryFromConstExpr( const QgsExpressionNode *node )
{
// Right now we support only geomFromWKT(' ..... ')
// Ideally we should support any constant sub-expression (not dependent on feature's geometry or attributes)
if ( node->nodeType() == QgsExpressionNode::ntFunction )
{
const QgsExpressionNodeFunction *fnNode = static_cast<const QgsExpressionNodeFunction *>( node );
QgsExpressionFunction *fnDef = QgsExpression::Functions()[fnNode->fnIndex()];
if ( fnDef->name() == QLatin1String( "geom_from_wkt" ) )
{
const QList<QgsExpressionNode *> &args = fnNode->args()->list();
if ( args[0]->nodeType() == QgsExpressionNode::ntLiteral )
{
QString wkt = static_cast<const QgsExpressionNodeLiteral *>( args[0] )->value().toString();
return QgsGeometry::fromWkt( wkt );
}
}
}
return QgsGeometry();
}
QDomElement QgsOgcUtilsExprToFilter::expressionFunctionToOgcFilter( const QgsExpressionNodeFunction *node, QgsExpression *expression, const QgsExpressionContext *context )
{
QgsExpressionFunction *fd = QgsExpression::Functions()[node->fnIndex()];
if ( fd->name() == QLatin1String( "intersects_bbox" ) )
{
QList<QgsExpressionNode *> argNodes = node->args()->list();
Q_ASSERT( argNodes.count() == 2 ); // binary spatial ops must have two args
QgsGeometry geom = geometryFromConstExpr( argNodes[1] );
if ( !geom.isNull() && isGeometryColumn( argNodes[0] ) )
{
QgsRectangle rect = geom.boundingBox();
mGMLUsed = true;
QDomElement elemBox = ( mGMLVersion == QgsOgcUtils::GML_2_1_2 ) ?
QgsOgcUtils::rectangleToGMLBox( &rect, mDoc, mSrsName, mInvertAxisOrientation ) :
QgsOgcUtils::rectangleToGMLEnvelope( &rect, mDoc, mSrsName, mInvertAxisOrientation );
QDomElement geomProperty = mDoc.createElement( mFilterPrefix + ":" + mPropertyName );
geomProperty.appendChild( mDoc.createTextNode( mGeometryName ) );
QDomElement funcElem = mDoc.createElement( mFilterPrefix + ":BBOX" );
funcElem.appendChild( geomProperty );
funcElem.appendChild( elemBox );
return funcElem;
}
else
{
mErrorMessage = QObject::tr( "<BBOX> is currently supported only in form: bbox($geometry, geomFromWKT('…'))" );
return QDomElement();
}
}
if ( isBinarySpatialOperator( fd->name() ) )
{
QList<QgsExpressionNode *> argNodes = node->args()->list();
Q_ASSERT( argNodes.count() == 2 ); // binary spatial ops must have two args
QgsExpressionNode *otherNode = nullptr;
if ( isGeometryColumn( argNodes[0] ) )
otherNode = argNodes[1];
else if ( isGeometryColumn( argNodes[1] ) )
otherNode = argNodes[0];
else
{
mErrorMessage = QObject::tr( "Unable to translate spatial operator: at least one must refer to geometry." );
return QDomElement();
}
QDomElement otherGeomElem;
// the other node must be a geometry constructor
if ( otherNode->nodeType() != QgsExpressionNode::ntFunction )
{
mErrorMessage = QObject::tr( "spatial operator: the other operator must be a geometry constructor function" );
return QDomElement();
}
const QgsExpressionNodeFunction *otherFn = static_cast<const QgsExpressionNodeFunction *>( otherNode );
QgsExpressionFunction *otherFnDef = QgsExpression::Functions()[otherFn->fnIndex()];
if ( otherFnDef->name() == QLatin1String( "geom_from_wkt" ) )
{
QgsExpressionNode *firstFnArg = otherFn->args()->list()[0];
if ( firstFnArg->nodeType() != QgsExpressionNode::ntLiteral )
{
mErrorMessage = QObject::tr( "geom_from_wkt: argument must be string literal" );
return QDomElement();
}
QString wkt = static_cast<const QgsExpressionNodeLiteral *>( firstFnArg )->value().toString();
QgsGeometry geom = QgsGeometry::fromWkt( wkt );
otherGeomElem = QgsOgcUtils::geometryToGML( geom, mDoc, mGMLVersion, mSrsName, mInvertAxisOrientation,
QStringLiteral( "qgis_id_geom_%1" ).arg( mGeomId ) );
mGeomId ++;
}
else if ( otherFnDef->name() == QLatin1String( "geom_from_gml" ) )
{
QgsExpressionNode *firstFnArg = otherFn->args()->list()[0];
if ( firstFnArg->nodeType() != QgsExpressionNode::ntLiteral )
{
mErrorMessage = QObject::tr( "geom_from_gml: argument must be string literal" );
return QDomElement();
}
QDomDocument geomDoc;
QString gml = static_cast<const QgsExpressionNodeLiteral *>( firstFnArg )->value().toString();
if ( !geomDoc.setContent( gml, true ) )
{
mErrorMessage = QObject::tr( "geom_from_gml: unable to parse XML" );
return QDomElement();
}
QDomNode geomNode = mDoc.importNode( geomDoc.documentElement(), true );
otherGeomElem = geomNode.toElement();
}
else
{
mErrorMessage = QObject::tr( "spatial operator: unknown geometry constructor function" );
return QDomElement();
}
mGMLUsed = true;
QDomElement funcElem = mDoc.createElement( mFilterPrefix + ":" + tagNameForSpatialOperator( fd->name() ) );
QDomElement geomProperty = mDoc.createElement( mFilterPrefix + ":" + mPropertyName );
geomProperty.appendChild( mDoc.createTextNode( mGeometryName ) );
funcElem.appendChild( geomProperty );
funcElem.appendChild( otherGeomElem );
return funcElem;
}
if ( fd->isStatic( node, expression, context ) )
{
QVariant result = fd->run( node->args(), context, expression, node );
QgsExpressionNodeLiteral literal( result );
return expressionLiteralToOgcFilter( &literal, expression, context );
}
if ( fd->params() == 0 )
{
mErrorMessage = QObject::tr( "Special columns/constants are not supported." );
return QDomElement();
}
// this is somehow wrong - we are just hoping that the other side supports the same functions as we do...
QDomElement funcElem = mDoc.createElement( mFilterPrefix + ":Function" );
funcElem.setAttribute( QStringLiteral( "name" ), fd->name() );
const auto constList = node->args()->list();
for ( QgsExpressionNode *n : constList )
{
QDomElement childElem = expressionNodeToOgcFilter( n, expression, context );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
funcElem.appendChild( childElem );
}
return funcElem;
}
//
QgsOgcUtilsSQLStatementToFilter::QgsOgcUtilsSQLStatementToFilter( QDomDocument &doc,
QgsOgcUtils::GMLVersion gmlVersion,
QgsOgcUtils::FilterVersion filterVersion,
const QList<QgsOgcUtils::LayerProperties> &layerProperties,
bool honourAxisOrientation,
bool invertAxisOrientation,
const QMap< QString, QString> &mapUnprefixedTypenameToPrefixedTypename )
: mDoc( doc )
, mGMLUsed( false )
, mGMLVersion( gmlVersion )
, mFilterVersion( filterVersion )
, mLayerProperties( layerProperties )
, mHonourAxisOrientation( honourAxisOrientation )
, mInvertAxisOrientation( invertAxisOrientation )
, mFilterPrefix( ( filterVersion == QgsOgcUtils::FILTER_FES_2_0 ) ? "fes" : "ogc" )
, mPropertyName( ( filterVersion == QgsOgcUtils::FILTER_FES_2_0 ) ? "ValueReference" : "PropertyName" )
, mGeomId( 1 )
, mMapUnprefixedTypenameToPrefixedTypename( mapUnprefixedTypenameToPrefixedTypename )
{
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::Node *node )
{
switch ( node->nodeType() )
{
case QgsSQLStatement::ntUnaryOperator:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeUnaryOperator *>( node ) );
case QgsSQLStatement::ntBinaryOperator:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeBinaryOperator *>( node ) );
case QgsSQLStatement::ntInOperator:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeInOperator *>( node ) );
case QgsSQLStatement::ntBetweenOperator:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeBetweenOperator *>( node ) );
case QgsSQLStatement::ntFunction:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeFunction *>( node ) );
case QgsSQLStatement::ntLiteral:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeLiteral *>( node ) );
case QgsSQLStatement::ntColumnRef:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeColumnRef *>( node ) );
case QgsSQLStatement::ntSelect:
return toOgcFilter( static_cast<const QgsSQLStatement::NodeSelect *>( node ) );
default:
mErrorMessage = QObject::tr( "Node type not supported: %1" ).arg( node->nodeType() );
return QDomElement();
}
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeUnaryOperator *node )
{
QDomElement operandElem = toOgcFilter( node->operand() );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QDomElement uoElem;
switch ( node->op() )
{
case QgsSQLStatement::uoMinus:
uoElem = mDoc.createElement( mFilterPrefix + ":Literal" );
if ( node->operand()->nodeType() == QgsSQLStatement::ntLiteral )
{
// operand expression already created a Literal node:
// take the literal value, prepend - and remove old literal node
uoElem.appendChild( mDoc.createTextNode( "-" + operandElem.text() ) );
mDoc.removeChild( operandElem );
}
else
{
mErrorMessage = QObject::tr( "This use of unary operator not implemented yet" );
return QDomElement();
}
break;
case QgsSQLStatement::uoNot:
uoElem = mDoc.createElement( mFilterPrefix + ":Not" );
uoElem.appendChild( operandElem );
break;
default:
mErrorMessage = QObject::tr( "Unary operator %1 not implemented yet" ).arg( QgsSQLStatement::UNARY_OPERATOR_TEXT[node->op()] );
return QDomElement();
}
return uoElem;
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeBinaryOperator *node )
{
QDomElement leftElem = toOgcFilter( node->opLeft() );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QgsSQLStatement::BinaryOperator op = node->op();
// before right operator is parsed: to allow NULL handling
if ( op == QgsSQLStatement::boIs || op == QgsSQLStatement::boIsNot )
{
if ( node->opRight()->nodeType() == QgsSQLStatement::ntLiteral )
{
const QgsSQLStatement::NodeLiteral *rightLit = static_cast<const QgsSQLStatement::NodeLiteral *>( node->opRight() );
if ( rightLit->value().isNull() )
{
QDomElement elem = mDoc.createElement( mFilterPrefix + ":PropertyIsNull" );
elem.appendChild( leftElem );
if ( op == QgsSQLStatement::boIsNot )
{
QDomElement notElem = mDoc.createElement( mFilterPrefix + ":Not" );
notElem.appendChild( elem );
return notElem;
}
return elem;
}
// continue with equal / not equal operator once the null case is handled
op = ( op == QgsSQLStatement::boIs ? QgsSQLStatement::boEQ : QgsSQLStatement::boNE );
}
}
QDomElement rightElem = toOgcFilter( node->opRight() );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QString opText;
if ( op == QgsSQLStatement::boOr )
opText = QStringLiteral( "Or" );
else if ( op == QgsSQLStatement::boAnd )
opText = QStringLiteral( "And" );
else if ( op == QgsSQLStatement::boEQ )
opText = QStringLiteral( "PropertyIsEqualTo" );
else if ( op == QgsSQLStatement::boNE )
opText = QStringLiteral( "PropertyIsNotEqualTo" );
else if ( op == QgsSQLStatement::boLE )
opText = QStringLiteral( "PropertyIsLessThanOrEqualTo" );
else if ( op == QgsSQLStatement::boGE )
opText = QStringLiteral( "PropertyIsGreaterThanOrEqualTo" );
else if ( op == QgsSQLStatement::boLT )
opText = QStringLiteral( "PropertyIsLessThan" );
else if ( op == QgsSQLStatement::boGT )
opText = QStringLiteral( "PropertyIsGreaterThan" );
else if ( op == QgsSQLStatement::boLike )
opText = QStringLiteral( "PropertyIsLike" );
else if ( op == QgsSQLStatement::boILike )
opText = QStringLiteral( "PropertyIsLike" );
if ( opText.isEmpty() )
{
// not implemented binary operators
mErrorMessage = QObject::tr( "Binary operator %1 not implemented yet" ).arg( QgsSQLStatement::BINARY_OPERATOR_TEXT[op] );
return QDomElement();
}
QDomElement boElem = mDoc.createElement( mFilterPrefix + ":" + opText );
if ( op == QgsSQLStatement::boLike || op == QgsSQLStatement::boILike )
{
if ( op == QgsSQLStatement::boILike )
boElem.setAttribute( QStringLiteral( "matchCase" ), QStringLiteral( "false" ) );
// setup wildCards to <ogc:PropertyIsLike>
boElem.setAttribute( QStringLiteral( "wildCard" ), QStringLiteral( "%" ) );
boElem.setAttribute( QStringLiteral( "singleChar" ), QStringLiteral( "_" ) );
if ( mFilterVersion == QgsOgcUtils::FILTER_OGC_1_0 )
boElem.setAttribute( QStringLiteral( "escape" ), QStringLiteral( "\\" ) );
else
boElem.setAttribute( QStringLiteral( "escapeChar" ), QStringLiteral( "\\" ) );
}
boElem.appendChild( leftElem );
boElem.appendChild( rightElem );
return boElem;
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeLiteral *node )
{
QString value;
switch ( node->value().type() )
{
case QVariant::Int:
value = QString::number( node->value().toInt() );
break;
case QVariant::LongLong:
value = QString::number( node->value().toLongLong() );
break;
case QVariant::Double:
value = qgsDoubleToString( node->value().toDouble() );
break;
case QVariant::String:
value = node->value().toString();
break;
default:
mErrorMessage = QObject::tr( "Literal type not supported: %1" ).arg( node->value().type() );
return QDomElement();
}
QDomElement litElem = mDoc.createElement( mFilterPrefix + ":Literal" );
litElem.appendChild( mDoc.createTextNode( value ) );
return litElem;
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeColumnRef *node )
{
QDomElement propElem = mDoc.createElement( mFilterPrefix + ":" + mPropertyName );
if ( node->tableName().isEmpty() || mLayerProperties.size() == 1 )
propElem.appendChild( mDoc.createTextNode( node->name() ) );
else
{
QString tableName( mMapTableAliasToNames[node->tableName()] );
if ( mMapUnprefixedTypenameToPrefixedTypename.contains( tableName ) )
tableName = mMapUnprefixedTypenameToPrefixedTypename[tableName];
propElem.appendChild( mDoc.createTextNode( tableName + "/" + node->name() ) );
}
return propElem;
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeInOperator *node )
{
if ( node->list()->list().size() == 1 )
return toOgcFilter( node->list()->list()[0] );
QDomElement orElem = mDoc.createElement( mFilterPrefix + ":Or" );
QDomElement leftNode = toOgcFilter( node->node() );
const auto constList = node->list()->list();
for ( QgsSQLStatement::Node *n : constList )
{
QDomElement listNode = toOgcFilter( n );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
QDomElement eqElem = mDoc.createElement( mFilterPrefix + ":PropertyIsEqualTo" );
eqElem.appendChild( leftNode.cloneNode() );
eqElem.appendChild( listNode );
orElem.appendChild( eqElem );
}
if ( node->isNotIn() )
{
QDomElement notElem = mDoc.createElement( mFilterPrefix + ":Not" );
notElem.appendChild( orElem );
return notElem;
}
return orElem;
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeBetweenOperator *node )
{
QDomElement elem = mDoc.createElement( mFilterPrefix + ":PropertyIsBetween" );
elem.appendChild( toOgcFilter( node->node() ) );
QDomElement lowerBoundary = mDoc.createElement( mFilterPrefix + ":LowerBoundary" );
lowerBoundary.appendChild( toOgcFilter( node->minVal() ) );
elem.appendChild( lowerBoundary );
QDomElement upperBoundary = mDoc.createElement( mFilterPrefix + ":UpperBoundary" );
upperBoundary.appendChild( toOgcFilter( node->maxVal() ) );
elem.appendChild( upperBoundary );
if ( node->isNotBetween() )
{
QDomElement notElem = mDoc.createElement( mFilterPrefix + ":Not" );
notElem.appendChild( elem );
return notElem;
}
return elem;
}
static QString mapBinarySpatialToOgc( const QString &name )
{
QString nameCompare( name );
if ( name.size() > 3 && name.midRef( 0, 3 ).compare( QLatin1String( "ST_" ), Qt::CaseInsensitive ) == 0 )
nameCompare = name.mid( 3 );
QStringList spatialOps;
spatialOps << QStringLiteral( "BBOX" ) << QStringLiteral( "Intersects" ) << QStringLiteral( "Contains" ) << QStringLiteral( "Crosses" ) << QStringLiteral( "Equals" )
<< QStringLiteral( "Disjoint" ) << QStringLiteral( "Overlaps" ) << QStringLiteral( "Touches" ) << QStringLiteral( "Within" );
const auto constSpatialOps = spatialOps;
for ( QString op : constSpatialOps )
{
if ( nameCompare.compare( op, Qt::CaseInsensitive ) == 0 )
return op;
}
return QString();
}
static QString mapTernarySpatialToOgc( const QString &name )
{
QString nameCompare( name );
if ( name.size() > 3 && name.midRef( 0, 3 ).compare( QLatin1String( "ST_" ), Qt::CaseInsensitive ) == 0 )
nameCompare = name.mid( 3 );
if ( nameCompare.compare( QLatin1String( "DWithin" ), Qt::CaseInsensitive ) == 0 )
return QStringLiteral( "DWithin" );
if ( nameCompare.compare( QLatin1String( "Beyond" ), Qt::CaseInsensitive ) == 0 )
return QStringLiteral( "Beyond" );
return QString();
}
QString QgsOgcUtilsSQLStatementToFilter::getGeometryColumnSRSName( const QgsSQLStatement::Node *node )
{
if ( node->nodeType() != QgsSQLStatement::ntColumnRef )
return QString();
const QgsSQLStatement::NodeColumnRef *col = static_cast<const QgsSQLStatement::NodeColumnRef *>( node );
if ( !col->tableName().isEmpty() )
{
const auto constMLayerProperties = mLayerProperties;
for ( QgsOgcUtils::LayerProperties prop : constMLayerProperties )
{
if ( prop.mName.compare( mMapTableAliasToNames[col->tableName()], Qt::CaseInsensitive ) == 0 &&
prop.mGeometryAttribute.compare( col->name(), Qt::CaseInsensitive ) == 0 )
{
return prop.mSRSName;
}
}
}
if ( !mLayerProperties.empty() &&
mLayerProperties.at( 0 ).mGeometryAttribute.compare( col->name(), Qt::CaseInsensitive ) == 0 )
{
return mLayerProperties.at( 0 ).mSRSName;
}
return QString();
}
bool QgsOgcUtilsSQLStatementToFilter::processSRSName( const QgsSQLStatement::NodeFunction *mainNode,
QList<QgsSQLStatement::Node *> args,
bool lastArgIsSRSName,
QString &srsName,
bool &axisInversion )
{
srsName = mCurrentSRSName;
axisInversion = mInvertAxisOrientation;
if ( lastArgIsSRSName )
{
QgsSQLStatement::Node *lastArg = args[ args.size() - 1 ];
if ( lastArg->nodeType() != QgsSQLStatement::ntLiteral )
{
mErrorMessage = QObject::tr( "%1: Last argument must be string or integer literal" ).arg( mainNode->name() );
return false;
}
const QgsSQLStatement::NodeLiteral *lit = static_cast<const QgsSQLStatement::NodeLiteral *>( lastArg );
if ( lit->value().type() == QVariant::Int )
{
if ( mFilterVersion == QgsOgcUtils::FILTER_OGC_1_0 )
{
srsName = "EPSG:" + QString::number( lit->value().toInt() );
}
else
{
srsName = "urn:ogc:def:crs:EPSG::" + QString::number( lit->value().toInt() );
}
}
else
{
srsName = lit->value().toString();
if ( srsName.startsWith( QLatin1String( "EPSG:" ), Qt::CaseInsensitive ) )
return true;
}
}
QgsCoordinateReferenceSystem crs;
if ( !srsName.isEmpty() )
crs = QgsCoordinateReferenceSystem::fromOgcWmsCrs( srsName );
if ( crs.isValid() )
{
if ( mHonourAxisOrientation && crs.hasAxisInverted() )
{
axisInversion = !axisInversion;
}
}
return true;
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeFunction *node )
{
// ST_GeometryFromText
if ( node->name().compare( QLatin1String( "ST_GeometryFromText" ), Qt::CaseInsensitive ) == 0 )
{
QList<QgsSQLStatement::Node *> args = node->args()->list();
if ( args.size() != 1 && args.size() != 2 )
{
mErrorMessage = QObject::tr( "Function %1 should have 1 or 2 arguments" ).arg( node->name() );
return QDomElement();
}
QgsSQLStatement::Node *firstFnArg = args[0];
if ( firstFnArg->nodeType() != QgsSQLStatement::ntLiteral )
{
mErrorMessage = QObject::tr( "%1: First argument must be string literal" ).arg( node->name() );
return QDomElement();
}
QString srsName;
bool axisInversion;
if ( ! processSRSName( node, args, args.size() == 2, srsName, axisInversion ) )
{
return QDomElement();
}
QString wkt = static_cast<const QgsSQLStatement::NodeLiteral *>( firstFnArg )->value().toString();
QgsGeometry geom = QgsGeometry::fromWkt( wkt );
QDomElement geomElem = QgsOgcUtils::geometryToGML( geom, mDoc, mGMLVersion, srsName, axisInversion,
QStringLiteral( "qgis_id_geom_%1" ).arg( mGeomId ) );
mGeomId ++;
if ( geomElem.isNull() )
{
mErrorMessage = QObject::tr( "%1: invalid WKT" ).arg( node->name() );
return QDomElement();
}
mGMLUsed = true;
return geomElem;
}
// ST_MakeEnvelope
if ( node->name().compare( QLatin1String( "ST_MakeEnvelope" ), Qt::CaseInsensitive ) == 0 )
{
QList<QgsSQLStatement::Node *> args = node->args()->list();
if ( args.size() != 4 && args.size() != 5 )
{
mErrorMessage = QObject::tr( "Function %1 should have 4 or 5 arguments" ).arg( node->name() );
return QDomElement();
}
QgsRectangle rect;
for ( int i = 0; i < 4; i++ )
{
QgsSQLStatement::Node *arg = args[i];
if ( arg->nodeType() != QgsSQLStatement::ntLiteral )
{
mErrorMessage = QObject::tr( "%1: Argument %2 must be numeric literal" ).arg( node->name() ).arg( i + 1 );
return QDomElement();
}
const QgsSQLStatement::NodeLiteral *lit = static_cast<const QgsSQLStatement::NodeLiteral *>( arg );
double val = 0.0;
if ( lit->value().type() == QVariant::Int )
val = lit->value().toInt();
else if ( lit->value().type() == QVariant::LongLong )
val = lit->value().toLongLong();
else if ( lit->value().type() == QVariant::Double )
val = lit->value().toDouble();
else
{
mErrorMessage = QObject::tr( "%1 Argument %2 must be numeric literal" ).arg( node->name() ).arg( i + 1 );
return QDomElement();
}
if ( i == 0 )
rect.setXMinimum( val );
else if ( i == 1 )
rect.setYMinimum( val );
else if ( i == 2 )
rect.setXMaximum( val );
else
rect.setYMaximum( val );
}
QString srsName;
bool axisInversion;
if ( ! processSRSName( node, args, args.size() == 5, srsName, axisInversion ) )
{
return QDomElement();
}
mGMLUsed = true;
return ( mGMLVersion == QgsOgcUtils::GML_2_1_2 ) ?
QgsOgcUtils::rectangleToGMLBox( &rect, mDoc, srsName, axisInversion, 15 ) :
QgsOgcUtils::rectangleToGMLEnvelope( &rect, mDoc, srsName, axisInversion, 15 );
}
// ST_GeomFromGML
if ( node->name().compare( QLatin1String( "ST_GeomFromGML" ), Qt::CaseInsensitive ) == 0 )
{
QList<QgsSQLStatement::Node *> args = node->args()->list();
if ( args.size() != 1 )
{
mErrorMessage = QObject::tr( "Function %1 should have 1 argument" ).arg( node->name() );
return QDomElement();
}
QgsSQLStatement::Node *firstFnArg = args[0];
if ( firstFnArg->nodeType() != QgsSQLStatement::ntLiteral )
{
mErrorMessage = QObject::tr( "%1: Argument must be string literal" ).arg( node->name() );
return QDomElement();
}
QDomDocument geomDoc;
QString gml = static_cast<const QgsSQLStatement::NodeLiteral *>( firstFnArg )->value().toString();
if ( !geomDoc.setContent( gml, true ) )
{
mErrorMessage = QObject::tr( "ST_GeomFromGML: unable to parse XML" );
return QDomElement();
}
QDomNode geomNode = mDoc.importNode( geomDoc.documentElement(), true );
mGMLUsed = true;
return geomNode.toElement();
}
// Binary geometry operators
QString ogcName( mapBinarySpatialToOgc( node->name() ) );
if ( !ogcName.isEmpty() )
{
QList<QgsSQLStatement::Node *> args = node->args()->list();
if ( args.size() != 2 )
{
mErrorMessage = QObject::tr( "Function %1 should have 2 arguments" ).arg( node->name() );
return QDomElement();
}
for ( int i = 0; i < 2; i ++ )
{
if ( args[i]->nodeType() == QgsSQLStatement::ntFunction &&
( static_cast<const QgsSQLStatement::NodeFunction *>( args[i] )->name().compare( QLatin1String( "ST_GeometryFromText" ), Qt::CaseInsensitive ) == 0 ||
static_cast<const QgsSQLStatement::NodeFunction *>( args[i] )->name().compare( QLatin1String( "ST_MakeEnvelope" ), Qt::CaseInsensitive ) == 0 ) )
{
mCurrentSRSName = getGeometryColumnSRSName( args[1 - i] );
break;
}
}
//if( ogcName == "Intersects" && mFilterVersion == QgsOgcUtils::FILTER_OGC_1_0 )
// ogcName = "Intersect";
QDomElement funcElem = mDoc.createElement( mFilterPrefix + ":" + ogcName );
const auto constArgs = args;
for ( QgsSQLStatement::Node *n : constArgs )
{
QDomElement childElem = toOgcFilter( n );
if ( !mErrorMessage.isEmpty() )
{
mCurrentSRSName.clear();
return QDomElement();
}
funcElem.appendChild( childElem );
}
mCurrentSRSName.clear();
return funcElem;
}
ogcName = mapTernarySpatialToOgc( node->name() );
if ( !ogcName.isEmpty() )
{
QList<QgsSQLStatement::Node *> args = node->args()->list();
if ( args.size() != 3 )
{
mErrorMessage = QObject::tr( "Function %1 should have 3 arguments" ).arg( node->name() );
return QDomElement();
}
for ( int i = 0; i < 2; i ++ )
{
if ( args[i]->nodeType() == QgsSQLStatement::ntFunction &&
( static_cast<const QgsSQLStatement::NodeFunction *>( args[i] )->name().compare( QLatin1String( "ST_GeometryFromText" ), Qt::CaseInsensitive ) == 0 ||
static_cast<const QgsSQLStatement::NodeFunction *>( args[i] )->name().compare( QLatin1String( "ST_MakeEnvelope" ), Qt::CaseInsensitive ) == 0 ) )
{
mCurrentSRSName = getGeometryColumnSRSName( args[1 - i] );
break;
}
}
QDomElement funcElem = mDoc.createElement( mFilterPrefix + ":" + node->name().mid( 3 ) );
for ( int i = 0; i < 2; i++ )
{
QDomElement childElem = toOgcFilter( args[i] );
if ( !mErrorMessage.isEmpty() )
{
mCurrentSRSName.clear();
return QDomElement();
}
funcElem.appendChild( childElem );
}
mCurrentSRSName.clear();
QgsSQLStatement::Node *distanceNode = args[2];
if ( distanceNode->nodeType() != QgsSQLStatement::ntLiteral )
{
mErrorMessage = QObject::tr( "Function %1 3rd argument should be a numeric value or a string made of a numeric value followed by a string" ).arg( node->name() );
return QDomElement();
}
const QgsSQLStatement::NodeLiteral *lit = static_cast<const QgsSQLStatement::NodeLiteral *>( distanceNode );
if ( lit->value().isNull() )
{
mErrorMessage = QObject::tr( "Function %1 3rd argument should be a numeric value or a string made of a numeric value followed by a string" ).arg( node->name() );
return QDomElement();
}
QString distance;
QString unit( QStringLiteral( "m" ) );
switch ( lit->value().type() )
{
case QVariant::Int:
distance = QString::number( lit->value().toInt() );
break;
case QVariant::LongLong:
distance = QString::number( lit->value().toLongLong() );
break;
case QVariant::Double:
distance = qgsDoubleToString( lit->value().toDouble() );
break;
case QVariant::String:
{
distance = lit->value().toString();
for ( int i = 0; i < distance.size(); i++ )
{
if ( !( ( distance[i] >= '0' && distance[i] <= '9' ) || distance[i] == '-' || distance[i] == '.' || distance[i] == 'e' || distance[i] == 'E' ) )
{
unit = distance.mid( i ).trimmed();
distance = distance.mid( 0, i );
break;
}
}
break;
}
default:
mErrorMessage = QObject::tr( "Literal type not supported: %1" ).arg( lit->value().type() );
return QDomElement();
}
QDomElement distanceElem = mDoc.createElement( mFilterPrefix + ":Distance" );
if ( mFilterVersion == QgsOgcUtils::FILTER_FES_2_0 )
distanceElem.setAttribute( QStringLiteral( "uom" ), unit );
else
distanceElem.setAttribute( QStringLiteral( "unit" ), unit );
distanceElem.appendChild( mDoc.createTextNode( distance ) );
funcElem.appendChild( distanceElem );
return funcElem;
}
// Other function
QDomElement funcElem = mDoc.createElement( mFilterPrefix + ":Function" );
funcElem.setAttribute( QStringLiteral( "name" ), node->name() );
const auto constList = node->args()->list();
for ( QgsSQLStatement::Node *n : constList )
{
QDomElement childElem = toOgcFilter( n );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
funcElem.appendChild( childElem );
}
return funcElem;
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeJoin *node,
const QString &leftTable )
{
QgsSQLStatement::Node *onExpr = node->onExpr();
if ( onExpr )
{
return toOgcFilter( onExpr );
}
QList<QDomElement> listElem;
const auto constUsingColumns = node->usingColumns();
for ( const QString &columnName : constUsingColumns )
{
QDomElement eqElem = mDoc.createElement( mFilterPrefix + ":PropertyIsEqualTo" );
QDomElement propElem1 = mDoc.createElement( mFilterPrefix + ":" + mPropertyName );
propElem1.appendChild( mDoc.createTextNode( leftTable + "/" + columnName ) );
eqElem.appendChild( propElem1 );
QDomElement propElem2 = mDoc.createElement( mFilterPrefix + ":" + mPropertyName );
propElem2.appendChild( mDoc.createTextNode( node->tableDef()->name() + "/" + columnName ) );
eqElem.appendChild( propElem2 );
listElem.append( eqElem );
}
if ( listElem.size() == 1 )
{
return listElem[0];
}
else if ( listElem.size() > 1 )
{
QDomElement andElem = mDoc.createElement( mFilterPrefix + ":And" );
const auto constListElem = listElem;
for ( const QDomElement &elem : constListElem )
{
andElem.appendChild( elem );
}
return andElem;
}
return QDomElement();
}
void QgsOgcUtilsSQLStatementToFilter::visit( const QgsSQLStatement::NodeTableDef *node )
{
if ( node->alias().isEmpty() )
{
mMapTableAliasToNames[ node->name()] = node->name();
}
else
{
mMapTableAliasToNames[ node->alias()] = node->name();
}
}
QDomElement QgsOgcUtilsSQLStatementToFilter::toOgcFilter( const QgsSQLStatement::NodeSelect *node )
{
QList<QDomElement> listElem;
if ( mFilterVersion != QgsOgcUtils::FILTER_FES_2_0 &&
( node->tables().size() != 1 || !node->joins().empty() ) )
{
mErrorMessage = QObject::tr( "Joins are only supported with WFS 2.0" );
return QDomElement();
}
// Register all table name aliases
const auto constTables = node->tables();
for ( QgsSQLStatement::NodeTableDef *table : constTables )
{
visit( table );
}
const auto constJoins = node->joins();
for ( QgsSQLStatement::NodeJoin *join : constJoins )
{
visit( join->tableDef() );
}
// Process JOIN conditions
QList< QgsSQLStatement::NodeTableDef *> nodeTables = node->tables();
QString leftTable = nodeTables.at( nodeTables.length() - 1 )->name();
for ( QgsSQLStatement::NodeJoin *join : constJoins )
{
QDomElement joinElem = toOgcFilter( join, leftTable );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
listElem.append( joinElem );
leftTable = join->tableDef()->name();
}
// Process WHERE conditions
if ( node->where() )
{
QDomElement whereElem = toOgcFilter( node->where() );
if ( !mErrorMessage.isEmpty() )
return QDomElement();
listElem.append( whereElem );
}
// Concatenate all conditions
if ( listElem.size() == 1 )
{
return listElem[0];
}
else if ( listElem.size() > 1 )
{
QDomElement andElem = mDoc.createElement( mFilterPrefix + ":And" );
const auto constListElem = listElem;
for ( const QDomElement &elem : constListElem )
{
andElem.appendChild( elem );
}
return andElem;
}
return QDomElement();
}
QgsOgcUtilsExpressionFromFilter::QgsOgcUtilsExpressionFromFilter( const QgsOgcUtils::FilterVersion version, const QgsVectorLayer *layer )
: mLayer( layer )
{
mPropertyName = QStringLiteral( "PropertyName" );
mPrefix = QStringLiteral( "ogc" );
if ( version == QgsOgcUtils::FILTER_FES_2_0 )
{
mPropertyName = QStringLiteral( "ValueReference" );
mPrefix = QStringLiteral( "fes" );
}
}
QgsExpressionNode *QgsOgcUtilsExpressionFromFilter::nodeFromOgcFilter( const QDomElement &element )
{
if ( element.isNull() )
return nullptr;
// check for binary operators
if ( isBinaryOperator( element.tagName() ) )
{
return nodeBinaryOperatorFromOgcFilter( element );
}
// check for spatial operators
if ( isSpatialOperator( element.tagName() ) )
{
return nodeSpatialOperatorFromOgcFilter( element );
}
// check for other OGC operators, convert them to expressions
if ( element.tagName() == QLatin1String( "Not" ) )
{
return nodeNotFromOgcFilter( element );
}
else if ( element.tagName() == QLatin1String( "PropertyIsNull" ) )
{
return nodePropertyIsNullFromOgcFilter( element );
}
else if ( element.tagName() == QLatin1String( "Literal" ) )
{
return nodeLiteralFromOgcFilter( element );
}
else if ( element.tagName() == QLatin1String( "Function" ) )
{
return nodeFunctionFromOgcFilter( element );
}
else if ( element.tagName() == mPropertyName )
{
return nodeColumnRefFromOgcFilter( element );
}
else if ( element.tagName() == QLatin1String( "PropertyIsBetween" ) )
{
return nodeIsBetweenFromOgcFilter( element );
}
mErrorMessage += QObject::tr( "unable to convert '%1' element to a valid expression: it is not supported yet or it has invalid arguments" ).arg( element.tagName() );
return nullptr;
}
QgsExpressionNodeBinaryOperator *QgsOgcUtilsExpressionFromFilter::nodeBinaryOperatorFromOgcFilter( const QDomElement &element )
{
if ( element.isNull() )
return nullptr;
int op = binaryOperatorFromTagName( element.tagName() );
if ( op < 0 )
{
mErrorMessage = QObject::tr( "'%1' binary operator not supported." ).arg( element.tagName() );
return nullptr;
}
if ( op == QgsExpressionNodeBinaryOperator::boLike && element.hasAttribute( QStringLiteral( "matchCase" ) ) && element.attribute( QStringLiteral( "matchCase" ) ) == QLatin1String( "false" ) )
{
op = QgsExpressionNodeBinaryOperator::boILike;
}
QDomElement operandElem = element.firstChildElement();
std::unique_ptr<QgsExpressionNode> expr( nodeFromOgcFilter( operandElem ) );
if ( !expr )
{
mErrorMessage = QObject::tr( "invalid left operand for '%1' binary operator" ).arg( element.tagName() );
return nullptr;
}
std::unique_ptr<QgsExpressionNode> leftOp( expr->clone() );
for ( operandElem = operandElem.nextSiblingElement(); !operandElem.isNull(); operandElem = operandElem.nextSiblingElement() )
{
std::unique_ptr<QgsExpressionNode> opRight( nodeFromOgcFilter( operandElem ) );
if ( !opRight )
{
mErrorMessage = QObject::tr( "invalid right operand for '%1' binary operator" ).arg( element.tagName() );
return nullptr;
}
if ( op == QgsExpressionNodeBinaryOperator::boLike || op == QgsExpressionNodeBinaryOperator::boILike )
{
QString wildCard;
if ( element.hasAttribute( QStringLiteral( "wildCard" ) ) )
{
wildCard = element.attribute( QStringLiteral( "wildCard" ) );
}
QString singleChar;
if ( element.hasAttribute( QStringLiteral( "singleChar" ) ) )
{
singleChar = element.attribute( QStringLiteral( "singleChar" ) );
}
QString escape = QStringLiteral( "\\" );
if ( element.hasAttribute( QStringLiteral( "escape" ) ) )
{
escape = element.attribute( QStringLiteral( "escape" ) );
}
if ( element.hasAttribute( QStringLiteral( "escapeChar" ) ) )
{
escape = element.attribute( QStringLiteral( "escapeChar" ) );
}
// replace
QString oprValue = static_cast<const QgsExpressionNodeLiteral *>( opRight.get() )->value().toString();
if ( !wildCard.isEmpty() && wildCard != QLatin1String( "%" ) )
{
oprValue.replace( '%', QLatin1String( "\\%" ) );
if ( oprValue.startsWith( wildCard ) )
{
oprValue.replace( 0, 1, QStringLiteral( "%" ) );
}
QRegExp rx( "[^" + QRegExp::escape( escape ) + "](" + QRegExp::escape( wildCard ) + ")" );
int pos = 0;
while ( ( pos = rx.indexIn( oprValue, pos ) ) != -1 )
{
oprValue.replace( pos + 1, 1, QStringLiteral( "%" ) );
pos += 1;
}
oprValue.replace( escape + wildCard, wildCard );
}
if ( !singleChar.isEmpty() && singleChar != QLatin1String( "_" ) )
{
oprValue.replace( '_', QLatin1String( "\\_" ) );
if ( oprValue.startsWith( singleChar ) )
{
oprValue.replace( 0, 1, QStringLiteral( "_" ) );
}
QRegExp rx( "[^" + QRegExp::escape( escape ) + "](" + QRegExp::escape( singleChar ) + ")" );
int pos = 0;
while ( ( pos = rx.indexIn( oprValue, pos ) ) != -1 )
{
oprValue.replace( pos + 1, 1, QStringLiteral( "_" ) );
pos += 1;
}
oprValue.replace( escape + singleChar, singleChar );
}
if ( !escape.isEmpty() && escape != QLatin1String( "\\" ) )
{
oprValue.replace( escape + escape, escape );
}
opRight.reset( new QgsExpressionNodeLiteral( oprValue ) );
}
expr.reset( new QgsExpressionNodeBinaryOperator( static_cast< QgsExpressionNodeBinaryOperator::BinaryOperator >( op ), expr.release(), opRight.release() ) );
}
if ( expr == leftOp )
{
mErrorMessage = QObject::tr( "only one operand for '%1' binary operator" ).arg( element.tagName() );
return nullptr;
}
return dynamic_cast< QgsExpressionNodeBinaryOperator * >( expr.release() );
}
QgsExpressionNodeFunction *QgsOgcUtilsExpressionFromFilter::nodeSpatialOperatorFromOgcFilter( const QDomElement &element )
{
// we are exploiting the fact that our function names are the same as the XML tag names
const int opIdx = QgsExpression::functionIndex( element.tagName().toLower() );
std::unique_ptr<QgsExpressionNode::NodeList> gml2Args( new QgsExpressionNode::NodeList() );
QDomElement childElem = element.firstChildElement();
QString gml2Str;
while ( !childElem.isNull() && gml2Str.isEmpty() )
{
if ( childElem.tagName() != mPropertyName )
{
QTextStream gml2Stream( &gml2Str );
childElem.save( gml2Stream, 0 );
}
childElem = childElem.nextSiblingElement();
}
if ( !gml2Str.isEmpty() )
{
gml2Args->append( new QgsExpressionNodeLiteral( QVariant( gml2Str.remove( '\n' ) ) ) );
}
else
{
mErrorMessage = QObject::tr( "No OGC Geometry found" );
return nullptr;
}
std::unique_ptr<QgsExpressionNode::NodeList> opArgs( new QgsExpressionNode::NodeList() );
opArgs->append( new QgsExpressionNodeFunction( QgsExpression::functionIndex( QStringLiteral( "$geometry" ) ), new QgsExpressionNode::NodeList() ) );
opArgs->append( new QgsExpressionNodeFunction( QgsExpression::functionIndex( QStringLiteral( "geomFromGML" ) ), gml2Args.release() ) );
return new QgsExpressionNodeFunction( opIdx, opArgs.release() );
}
QgsExpressionNodeColumnRef *QgsOgcUtilsExpressionFromFilter::nodeColumnRefFromOgcFilter( const QDomElement &element )
{
if ( element.isNull() || element.tagName() != mPropertyName )
{
mErrorMessage = QObject::tr( "%1:PropertyName expected, got %2" ).arg( mPrefix, element.tagName() );
return nullptr;
}
return new QgsExpressionNodeColumnRef( element.firstChild().nodeValue() );
}
QgsExpressionNode *QgsOgcUtilsExpressionFromFilter::nodeLiteralFromOgcFilter( const QDomElement &element )
{
if ( element.isNull() || element.tagName() != QLatin1String( "Literal" ) )
{
mErrorMessage = QObject::tr( "%1:Literal expected, got %2" ).arg( mPrefix, element.tagName() );
return nullptr;
}
std::unique_ptr<QgsExpressionNode> root;
// the literal content can have more children (e.g. CDATA section, text, ...)
QDomNode childNode = element.firstChild();
while ( !childNode.isNull() )
{
std::unique_ptr<QgsExpressionNode> operand;
if ( childNode.nodeType() == QDomNode::ElementNode )
{
// found a element node (e.g. PropertyName), convert it
const QDomElement operandElem = childNode.toElement();
operand.reset( nodeFromOgcFilter( operandElem ) );
if ( !operand )
{
mErrorMessage = QObject::tr( "'%1' is an invalid or not supported content for %2:Literal" ).arg( operandElem.tagName(), mPrefix );
return nullptr;
}
}
else
{
// probably a text/CDATA node
QVariant value = childNode.nodeValue();
bool converted = false;
// try to convert the node content to corresponding field type if possible
if ( mLayer )
{
QDomElement propertyNameElement = element.previousSiblingElement( mPropertyName );
if ( propertyNameElement.isNull() || propertyNameElement.tagName() != mPropertyName )
{
propertyNameElement = element.nextSiblingElement( mPropertyName );
}
if ( !propertyNameElement.isNull() || propertyNameElement.tagName() == mPropertyName )
{
const int fieldIndex = mLayer->fields().indexOf( propertyNameElement.firstChild().nodeValue() );
if ( fieldIndex != -1 )
{
QgsField field = mLayer->fields().field( propertyNameElement.firstChild().nodeValue() );
field.convertCompatible( value );
converted = true;
}
}
}
if ( !converted )
{
// try to convert the node content to number if possible,
// otherwise let's use it as string
bool ok;
const double d = value.toDouble( &ok );
if ( ok )
value = d;
}
operand.reset( new QgsExpressionNodeLiteral( value ) );
if ( !operand )
continue;
}
// use the concat operator to merge the ogc:Literal children
if ( !root )
{
root = std::move( operand );
}
else
{
root.reset( new QgsExpressionNodeBinaryOperator( QgsExpressionNodeBinaryOperator::boConcat, root.release(), operand.release() ) );
}
childNode = childNode.nextSibling();
}
if ( root )
return root.release();
return nullptr;
}
QgsExpressionNodeUnaryOperator *QgsOgcUtilsExpressionFromFilter::nodeNotFromOgcFilter( const QDomElement &element )
{
if ( element.tagName() != QLatin1String( "Not" ) )
return nullptr;
const QDomElement operandElem = element.firstChildElement();
std::unique_ptr<QgsExpressionNode> operand( nodeFromOgcFilter( operandElem ) );
if ( !operand )
{
mErrorMessage = QObject::tr( "invalid operand for '%1' unary operator" ).arg( element.tagName() );
return nullptr;
}
return new QgsExpressionNodeUnaryOperator( QgsExpressionNodeUnaryOperator::uoNot, operand.release() );
}
QgsExpressionNodeBinaryOperator *QgsOgcUtilsExpressionFromFilter::nodePropertyIsNullFromOgcFilter( const QDomElement &element )
{
// convert ogc:PropertyIsNull to IS operator with NULL right operand
if ( element.tagName() != QLatin1String( "PropertyIsNull" ) )
{
return nullptr;
}
const QDomElement operandElem = element.firstChildElement();
std::unique_ptr<QgsExpressionNode> opLeft( nodeFromOgcFilter( operandElem ) );
if ( !opLeft )
return nullptr;
std::unique_ptr<QgsExpressionNode> opRight( new QgsExpressionNodeLiteral( QVariant() ) );
return new QgsExpressionNodeBinaryOperator( QgsExpressionNodeBinaryOperator::boIs, opLeft.release(), opRight.release() );
}
QgsExpressionNodeFunction *QgsOgcUtilsExpressionFromFilter::nodeFunctionFromOgcFilter( const QDomElement &element )
{
if ( element.isNull() || element.tagName() != QLatin1String( "Function" ) )
{
mErrorMessage = QObject::tr( "%1:Function expected, got %2" ).arg( mPrefix, element.tagName() );
return nullptr;
}
for ( int i = 0; i < QgsExpression::Functions().size(); i++ )
{
const QgsExpressionFunction *funcDef = QgsExpression::Functions()[i];
if ( element.attribute( QStringLiteral( "name" ) ) != funcDef->name() )
continue;
std::unique_ptr<QgsExpressionNode::NodeList> args( new QgsExpressionNode::NodeList() );
QDomElement operandElem = element.firstChildElement();
while ( !operandElem.isNull() )
{
std::unique_ptr<QgsExpressionNode> op( nodeFromOgcFilter( operandElem ) );
if ( !op )
{
return nullptr;
}
args->append( op.release() );
operandElem = operandElem.nextSiblingElement();
}
return new QgsExpressionNodeFunction( i, args.release() );
}
return nullptr;
}
QgsExpressionNode *QgsOgcUtilsExpressionFromFilter::nodeIsBetweenFromOgcFilter( const QDomElement &element )
{
// <ogc:PropertyIsBetween> encode a Range check
std::unique_ptr<QgsExpressionNode> operand;
std::unique_ptr<QgsExpressionNode> lowerBound;
std::unique_ptr<QgsExpressionNode> upperBound;
QDomElement operandElem = element.firstChildElement();
while ( !operandElem.isNull() )
{
if ( operandElem.tagName() == QLatin1String( "LowerBoundary" ) )
{
QDomElement lowerBoundElem = operandElem.firstChildElement();
lowerBound.reset( nodeFromOgcFilter( lowerBoundElem ) );
}
else if ( operandElem.tagName() == QLatin1String( "UpperBoundary" ) )
{
QDomElement upperBoundElem = operandElem.firstChildElement();
upperBound.reset( nodeFromOgcFilter( upperBoundElem ) );
}
else
{
// <ogc:expression>
operand.reset( nodeFromOgcFilter( operandElem ) );
}
if ( operand && lowerBound && upperBound )
break;
operandElem = operandElem.nextSiblingElement();
}
if ( !operand || !lowerBound || !upperBound )
{
mErrorMessage = QObject::tr( "missing some required sub-elements in %1:PropertyIsBetween" ).arg( mPrefix );
return nullptr;
}
std::unique_ptr<QgsExpressionNode> leOperator( new QgsExpressionNodeBinaryOperator( QgsExpressionNodeBinaryOperator::boLE, operand->clone(), upperBound.release() ) );
std::unique_ptr<QgsExpressionNode> geOperator( new QgsExpressionNodeBinaryOperator( QgsExpressionNodeBinaryOperator::boGE, operand.release(), lowerBound.release() ) );
return new QgsExpressionNodeBinaryOperator( QgsExpressionNodeBinaryOperator::boAnd, geOperator.release(), leOperator.release() );
}
QString QgsOgcUtilsExpressionFromFilter::errorMessage() const
{
return mErrorMessage;
}<|fim▁end|> | break; |
<|file_name|>stats.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Mon May 16 17:14:41 2016
@author: sdemyanov
"""
import numpy as np
from sklearn import metrics
def get_prob_acc(probs, labels):
return np.mean(np.argmax(probs, axis=1) == labels)
def get_auc_score(scores, labels):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
return metrics.auc(fpr, tpr)
def get_f1_score(confmat):
assert confmat.shape[0] == 2 and confmat.shape[1] == 2
precision = float(confmat[0, 0]) / np.sum(confmat[:, 0])
recall = float(confmat[0, 0]) / np.sum(confmat[0, :])
print 'precision: %f' % precision
print 'recall: %f' % recall
return 2 * precision * recall / (precision + recall)
def get_accuracy(confmat):
correct = np.sum(np.diagonal(confmat))
overall = np.sum(confmat)
return correct.astype(float) / overall
def get_sensitivities(confmat):
correct = np.diagonal(confmat)
overall = np.sum(confmat, 1)
return np.divide(np.array(correct, dtype=np.float), overall)
def get_pred_confmat(classes, preds, labels):
classnum = len(classes)
mat = np.zeros((classnum, classnum), dtype=int)
for pind in range(preds.shape[0]):
labind = np.where(classes == labels[pind])
predind = np.where(classes == preds[pind])
mat[labind[0], predind[0]] += 1
# mat = np.transpose(mat)
return mat
<|fim▁hole|> for pind in range(probs.shape[0]):
mat[int(labels[pind]), np.argmax(probs[pind, :])] += 1
#mat = np.transpose(mat)
return mat
def get_block_confmat(confmat, blocks):
assert(confmat.shape[0] == confmat.shape[1])
classnum = confmat.shape[0]
#assert(np.sum(blocks) == classnum)
blocknum = len(blocks)
blockconf = np.zeros((blocknum, blocknum))
for bi in range(blocknum):
for bj in range(blocknum):
blockconf[bi, bj] = 0
for i in blocks[bi]:
for j in blocks[bj]:
blockconf[bi, bj] += confmat[i, j]
assert np.sum(blockconf) == np.sum(confmat), 'Blocks should represent a splitting of confmat'
return blockconf
def get_block_probs_labels(prob, labels, blocks):
# IMPORTANT: blocks must not intersect, otherwise the result is not unique
blocknum = len(blocks)
assert prob.shape[0] == labels.shape[0]
newprob = np.zeros((prob.shape[0], blocknum))
for i in range(blocknum):
newprob[:, i] = np.sum(prob[:, blocks[i]], 1)
#normalize to have sum = 1
mult_coefs = np.sum(newprob, 1, keepdims=True)
newprob /= np.tile(mult_coefs, (1, blocknum))
newlab = np.zeros(prob.shape[0])
missing = []
for i in range(prob.shape[0]):
is_missing = True
for j in range(len(blocks)):
if (labels[i] in blocks[j]):
newlab[i] = j
is_missing = False
break
if (is_missing):
missing.append(i)
newprob = np.delete(newprob, missing, axis=0)
newlab = np.delete(newlab, missing, axis=0)
return newprob, newlab
def get_spec_for_sens(scores, labels, sens):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
curind = np.size(tpr) - 1
while (tpr[curind-1] >= sens):
curind -= 1
return tpr[curind], 1 - fpr[curind], thresholds[curind]
def get_sens_for_spec(scores, labels, spec):
fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=0)
curind = 0
while (1 - fpr[curind+1] >= spec):
curind += 1
return tpr[curind], 1 - fpr[curind], thresholds[curind]
def get_average_precisions(probs, labels):
print 'probshape:', np.shape(probs)
classnum = np.size(probs, 1)
labels_arr = np.zeros_like(probs)
for i in xrange(classnum):
labels_arr[labels == i, i] = 1
print 'macro:', metrics.average_precision_score(labels_arr, probs, average='macro')
print 'weighted:', metrics.average_precision_score(labels_arr, probs, average='weighted')
skap = metrics.average_precision_score(labels_arr, probs, average=None)
return {i: round(skap[i] * 1000) / 10 for i in xrange(classnum)}<|fim▁end|> | def get_prob_confmat(probs, labels):
classnum = probs.shape[1]
mat = np.zeros((classnum, classnum), dtype=int) |
<|file_name|>lineCounterTest.js<|end_file_name|><|fim▁begin|>/**
* Created by ozgur on 24.07.2017.
*/
var assert = require('assert');
var fs = require("fs");
var path = require("path");
var LineCounter = require("../lib/LineCounter");
var ExtensionsFactory = require("../lib/Extensions");
var Rules = require("../lib/Rules");
describe("LineCounter", function(){
before(function(){
var dir = __dirname;
fs.mkdirSync(path.join(dir, "dir"));
fs.mkdirSync(path.join(dir, "dir", "dir2"));
fs.mkdirSync(path.join(dir, "dir", "dir3"));
fs.writeFileSync(path.join(dir, "dir", "file1.java"), "line1\nline2");
fs.writeFileSync(path.join(dir, "dir", "file1.js"), "line1\nline2\nline3");
fs.writeFileSync(path.join(dir, "dir", "dir2", "file3.php"), "line1\nline2");
fs.writeFileSync(path.join(dir, "dir", "dir2", "file4.swift"), "line1\nline2");
fs.writeFileSync(path.join(dir, "dir", "dir3", "file5.java"), "line1\nline2");
fs.writeFileSync(path.join(dir, "dir", "dir3", "file6.js"), "line1\nline2");
});
describe("#resolveTargetFiles()", function(){
it("should return allowed extensions", function(){
var lc = new LineCounter();
lc.setPath(path.join(__dirname, "dir"));
lc.setExtensions(ExtensionsFactory.from("js"));
var result = lc.resolveTargetFiles();
var expected = [ path.join(__dirname, "dir", "dir3", "file6.js"), path.join(__dirname, "dir", "file1.js") ];
for( var i = 0; i < expected.length; i++ ){
assert.equal(expected[i], result[i].getPath());
}
});
<|fim▁hole|> it("should return all except disallowed ones", function(){
var lc = new LineCounter();
lc.setPath(path.join(__dirname, "dir"));
lc.setExtensions(ExtensionsFactory.except("java, swift, php"));
var result = lc.resolveTargetFiles();
var expected = [ path.join(__dirname, "dir", "dir3", "file6.js"), path.join(__dirname, "dir", "file1.js") ];
for( var i = 0; i < expected.length; i++ ){
assert.equal(expected[i], result[i].getPath());
}
});
it("should return all", function(){
var lc = new LineCounter();
lc.setPath(path.join(__dirname, "dir"));
var result = lc.resolveTargetFiles();
var expected = [ path.join(__dirname, "dir", "dir2", "file3.php"), path.join(__dirname, "dir", "dir2", "file4.swift"),
path.join(__dirname, "dir", "dir3", "file5.java"), path.join(__dirname, "dir", "dir3", "file6.js"),
path.join(__dirname, "dir", "file1.java"), path.join(__dirname, "dir", "file1.js")];
for( var i = 0; i < expected.length; i++ ){
assert.equal(expected[i], result[i].getPath());
}
});
});
describe("#getLines()", function(){
it("should count all files correctly", function(done){
var lc = new LineCounter();
lc.setPath(path.join(__dirname, "dir"));
lc.getLines(function(result){
assert.equal(6, result.files);
assert.equal(13, result.lines);
done();
});
});
it("should count only js files", function(done){
var lc = new LineCounter();
lc.setPath(path.join(__dirname, "dir"));
lc.setExtensions(ExtensionsFactory.from("js"));
lc.getLines(function(result){
assert.equal(2, result.files);
assert.equal(5, result.lines);
done();
});
});
});
describe("#addRule()", function(){
it("should return only the files starts with file1", function(){
var lc = new LineCounter();
lc.setPath(path.join(__dirname, "dir"));
lc.addRule(Rules.filePrefix, "file1");
var result = lc.resolveTargetFiles();
var expected = [ path.join(__dirname, "dir", "file1.java"), path.join(__dirname, "dir", "file1.js") ];
for( var i = 0; i < expected.length; i++ ){
assert.equal(expected[i], result[i].getPath());
}
});
it("should ignore dir2 and dir3 directories", function(){
var lc = new LineCounter();
lc.setPath(path.join(__dirname, "dir"));
lc.addRule(Rules.ignoreDir, "dir2");
lc.addRule(Rules.ignoreDir, "dir3");
var result = lc.resolveTargetFiles();
var expected = [ path.join(__dirname, "dir", "file1.java"), path.join(__dirname, "dir", "file1.js") ];
for( var i = 0; i < expected.length; i++ ){
assert.equal(expected[i], result[i].getPath());
}
});
});
after(function(){
var dir = __dirname;
fs.unlinkSync(path.join(dir, "dir", "dir3", "file6.js"));
fs.unlinkSync(path.join(dir, "dir", "dir3", "file5.java"));
fs.unlinkSync(path.join(dir, "dir", "dir2", "file4.swift"));
fs.unlinkSync(path.join(dir, "dir", "dir2", "file3.php"));
fs.unlinkSync(path.join(dir, "dir", "file1.js"));
fs.unlinkSync(path.join(dir, "dir", "file1.java"));
fs.rmdirSync(path.join(dir, "dir", "dir2"));
fs.rmdirSync(path.join(dir, "dir", "dir3"));
fs.rmdirSync(path.join(dir, "dir"));
});
});<|fim▁end|> | |
<|file_name|>tocfilter.py<|end_file_name|><|fim▁begin|>import os
import finder
import re<|fim▁hole|>def makefilter(name, xtrapath=None):
typ, nm, fullname = finder.identify(name, xtrapath)
if typ in (finder.SCRIPT, finder.GSCRIPT, finder.MODULE):
return ModFilter([os.path.splitext(nm)[0]])
if typ == finder.PACKAGE:
return PkgFilter([fullname])
if typ == finder.DIRECTORY:
return DirFilter([fullname])
if typ in (finder.BINARY, finder.PBINARY):
return FileFilter([nm])
return FileFilter([fullname])
class _Filter:
def __repr__(self):
return '<'+self.__class__.__name__+' '+repr(self.elements)+'>'
class _NameFilter(_Filter):
""" A filter mixin that matches (exactly) on name """
def matches(self, res):
return self.elements.get(res.name, 0)
class _PathFilter(_Filter):
""" A filter mixin that matches if the resource is below any of the paths"""
def matches(self, res):
p = os.path.normcase(os.path.abspath(res.path))
while len(p) > 3:
p = os.path.dirname(p)
if self.elements.get(p, 0):
return 1
return 0
class _ExtFilter(_Filter):
""" A filter mixin that matches based on file extensions (either way) """
include = 0
def matches(self, res):
fnd = self.elements.get(os.path.splitext(res.path)[1], 0)
if self.include:
return not fnd
return fnd
class _TypeFilter(_Filter):
""" A filter mixin that matches on resource type (either way) """
include = 0
def matches(self, res):
fnd = self.elements.get(res.typ, 0)
if self.include:
return not fnd
return fnd
class _PatternFilter(_Filter):
""" A filter that matches if re.search succeeds on the resource path """
def matches(self, res):
for regex in self.elements:
if regex.search(res.path):
return 1
return 0
class ExtFilter(_ExtFilter):
""" A file extension filter.
ExtFilter(extlist, include=0)
where extlist is a list of file extensions """
def __init__(self, extlist, include=0):
self.elements = {}
for ext in extlist:
if ext[0:1] != '.':
ext = '.'+ext
self.elements[ext] = 1
self.include = include
class TypeFilter(_TypeFilter):
""" A filter for resource types.
TypeFilter(typlist, include=0)
where typlist is a subset of ['a','b','d','m','p','s','x','z'] """
def __init__(self, typlist, include=0):
self.elements = {}
for typ in typlist:
self.elements[typ] = 1
self.include = include
class FileFilter(_NameFilter):
""" A filter for data files """
def __init__(self, filelist):
self.elements = {}
for f in filelist:
self.elements[f] = 1
class ModFilter(_NameFilter):
""" A filter for Python modules.
ModFilter(modlist) where modlist is eg ['macpath', 'dospath'] """
def __init__(self, modlist):
self.elements = {}
for mod in modlist:
self.elements[mod] = 1
class DirFilter(_PathFilter):
""" A filter based on directories.
DirFilter(dirlist)
dirs may be relative and will be normalized.
Subdirectories of dirs will be excluded. """
def __init__(self, dirlist):
self.elements = {}
for pth in dirlist:
pth = os.path.normcase(os.path.abspath(pth))
self.elements[pth] = 1
class PkgFilter(_PathFilter):
"""At this time, identical to a DirFilter (being lazy) """
def __init__(self, pkglist):
#warning - pkgs are expected to be full directories
self.elements = {}
for pkg in pkglist:
pth = os.path.normcase(os.path.abspath(pkg))
self.elements[pth] = 1
class StdLibFilter(_PathFilter):
""" A filter that excludes anything found in the standard library """
def __init__(self):
pth = os.path.normcase(os.path.join(sys.exec_prefix, 'lib'))
self.elements = {pth:1}
class PatternFilter(_PatternFilter):
""" A filter that excludes if any pattern is found in resource's path """
def __init__(self, patterns):
self.elements = []
for pat in patterns:
self.elements.append(re.compile(pat))<|fim▁end|> | import sys
|
<|file_name|>pbs_testlib.py<|end_file_name|><|fim▁begin|># coding: utf-8
# Copyright (C) 1994-2016 Altair Engineering, Inc.
# For more information, contact Altair at www.altair.com.
#
# This file is part of the PBS Professional ("PBS Pro") software.
#
# Open Source License Information:
#
# PBS Pro is free software. You can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# PBS Pro is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Commercial License Information:
#
# The PBS Pro software is licensed under the terms of the GNU Affero General
# Public License agreement ("AGPL"), except where a separate commercial license
# agreement for PBS Pro version 14 or later has been executed in writing with
# Altair.
#
# Altair’s dual-license business model allows companies, individuals, and
# organizations to create proprietary derivative works of PBS Pro and
# distribute them - whether embedded or bundled with other software - under
# a commercial license agreement.
#
# Use of Altair’s trademarks, including but not limited to "PBS™",
# "PBS Professional®", and "PBS Pro™" and Altair’s logos is subject to Altair's
# trademark licensing policies.
import sys
import os
import socket
import pwd
import grp
import logging
import time
import re
import random
import string
import tempfile
import cPickle
import copy
import datetime
import traceback
import threading
from operator import itemgetter
from collections import OrderedDict
from distutils.version import LooseVersion
try:
import psycopg2
PSYCOPG = True
except:
PSYCOPG = False
try:
from ptl.lib.pbs_ifl import *
API_OK = True
except:
try:
from ptl.lib.pbs_ifl_mock import *
except:
sys.stderr.write("failed to import pbs_ifl, run pbs_swigify " +
"to make it\n")
raise ImportError
API_OK = False
from ptl.lib.pbs_api_to_cli import api_to_cli
from ptl.utils.pbs_dshutils import DshUtils
from ptl.utils.pbs_procutils import ProcUtils
from ptl.utils.pbs_cliutils import CliUtils
from ptl.utils.pbs_fileutils import FileUtils, FILE_TAIL
# suppress logging exceptions
logging.raiseExceptions = False
# Various mappings and aliases
MGR_OBJ_VNODE = MGR_OBJ_NODE
VNODE = MGR_OBJ_VNODE
NODE = MGR_OBJ_NODE
HOST = MGR_OBJ_HOST
JOB = MGR_OBJ_JOB
RESV = MGR_OBJ_RESV
SERVER = MGR_OBJ_SERVER
QUEUE = MGR_OBJ_QUEUE
SCHED = MGR_OBJ_SCHED
HOOK = MGR_OBJ_HOOK
RSC = MGR_OBJ_RSC
PBS_HOOK = MGR_OBJ_PBS_HOOK
# the order of these symbols matters, see pbs_ifl.h
(SET, UNSET, INCR, DECR, EQ, NE, GE, GT,
LE, LT, MATCH, MATCH_RE, NOT, DFLT) = range(14)
(PTL_OR, PTL_AND) = [0, 1]
(IFL_SUBMIT, IFL_SELECT, IFL_TERMINATE, IFL_ALTER,
IFL_MSG, IFL_DELETE) = [0, 1, 2, 3, 4, 5]
(PTL_API, PTL_CLI) = ['api', 'cli']
(PTL_COUNTER, PTL_FILTER) = [0, 1]
PTL_STR_TO_OP = {
'<': LT,
'<=': LE,
'=': EQ,
'>=': GE,
'>': GT,
'!=': NE,
' set ': SET,
' unset ': UNSET,
' match ': MATCH,
'~': MATCH_RE,
'!': NOT
}
PTL_OP_TO_STR = {
LT: '<',
LE: '<=',
EQ: '=',
GE: '>=',
GT: '>',
SET: ' set ',
NE: '!=',
UNSET: ' unset ',
MATCH: ' match ',
MATCH_RE: '~',
NOT: 'is not'
}
PTL_ATTROP_TO_STR = {PTL_AND: '&&', PTL_OR: '||'}
(RESOURCES_AVAILABLE, RESOURCES_TOTAL) = [0, 1]
EXPECT_MAP = {
UNSET: 'Unset',
SET: 'Set',
EQ: 'Equal',
NE: 'Not Equal',
LT: 'Less Than',
GT: 'Greater Than',
LE: 'Less Equal Than',
GE: 'Greater Equal Than',
MATCH_RE: 'Matches regexp',
MATCH: 'Matches',
NOT: 'Not'
}
PBS_CMD_MAP = {
MGR_CMD_CREATE: 'create',
MGR_CMD_SET: 'set',
MGR_CMD_DELETE: 'delete',
MGR_CMD_UNSET: 'unset',
MGR_CMD_IMPORT: 'import',
MGR_CMD_EXPORT: 'export',
MGR_CMD_LIST: 'list',
}
PBS_CMD_TO_OP = {
MGR_CMD_SET: SET,
MGR_CMD_UNSET: UNSET,
MGR_CMD_DELETE: UNSET,
MGR_CMD_CREATE: SET,
}
PBS_OBJ_MAP = {
MGR_OBJ_NONE: 'none',
SERVER: 'server',
QUEUE: 'queue',
JOB: 'job',
NODE: 'node',
RESV: 'reservation',
RSC: 'resource',
SCHED: 'sched',
HOST: 'host',
HOOK: 'hook',
VNODE: 'node',
PBS_HOOK: 'pbshook'
}
PTL_TRUE = ('1', 'true', 't', 'yes', 'y', 'enable', 'enabled', 'True', True)
PTL_FALSE = ('0', 'false', 'f', 'no', 'n', 'disable', 'disabled', 'False',
False)
PTL_NONE = ('None', None)
PTL_FORMULA = '__formula__'
PTL_NOARG = '__noarg__'
PTL_ALL = '__ALL__'
CMD_ERROR_MAP = {
'alterjob': 'PbsAlterError',
'holdjob': 'PbsHoldError',
'sigjob': 'PbsSignalError',
'msgjob': 'PbsMessageError',
'rlsjob': 'PbsReleaseError',
'rerunjob': 'PbsRerunError',
'orderjob': 'PbsOrderError',
'runjob': 'PbsRunError',
'movejob': 'PbsMoveError',
'delete': 'PbsDeleteError',
'deljob': 'PbsDeljobError',
'delresv': 'PbsDelresvError',
'status': 'PbsStatusError',
'manager': 'PbsManagerError',
'submit': 'PbsSubmitError',
'terminate': 'PbsQtermError'
}
class PtlConfig(object):
"""
Holds configuration options
The options can be stored in a file as well as in the OS environment
variables.When set, the environment variables will override
definitions in the file.By default, on Unix like systems, the file
read is ``/etc/ptl.conf``, the environment variable ``PTL_CONF_FILE``
can be used to set the path to the file to read.
The format of the file is a series of ``<key> = <value>`` properties.
A line that starts with a '#' is ignored and can be used for comments
:param conf: Path to PTL configuration file
:type conf: str or None
"""
logger = logging.getLogger(__name__)
def __init__(self, conf=None):
self.options = {
'PTL_SUDO_CMD': 'sudo -H',
'PTL_RSH_CMD': 'ssh',
'PTL_CP_CMD': 'scp -p',
'PTL_EXPECT_MAX_ATTEMPTS': 60,
'PTL_EXPECT_INTERVAL': 0.5,
'PTL_UPDATE_ATTRIBUTES': True,
}
self.handlers = {
'PTL_SUDO_CMD': DshUtils.set_sudo_cmd,
'PTL_RSH_CMD': DshUtils.set_rsh_cmd,
'PTL_CP_CMD': DshUtils.set_copy_cmd,
'PTL_EXPECT_MAX_ATTEMPTS': Server.set_expect_max_attempts,
'PTL_EXPECT_INTERVAL': Server.set_expect_interval,
'PTL_UPDATE_ATTRIBUTES': Server.set_update_attributes
}
if conf is None:
conf = os.environ.get('PTL_CONF_FILE', '/etc/ptl.conf')
try:
lines = open(conf).readlines()
except IOError:
lines = []
for line in lines:
line = line.strip()
if (line.startswith('#') or (line == '')):
continue
try:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
self.options[k] = v
except:
self.logger.error('Error parsing line ' + line)
for k, v in self.options.items():
if k in os.environ:
v = os.environ[k]
else:
os.environ[k] = str(v)
if k in self.handlers:
self.handlers[k](v)
class PtlException(Exception):
"""
Generic errors raised by PTL operations.
Sets a ``return value``, a ``return code``, and a ``message``
A post function and associated positional and named arguments
are available to perform any necessary cleanup.
:param rv: Return value set for the error occured during PTL
operation
:type rv: int or None.
:param rc: Return code set for the error occured during PTL
operation
:type rc: int or None.
:param msg: Message set for the error occured during PTL operation
:type msg: str or None.
:param post: Execute necessary cleanup if not None
:raises: PTL exceptions
"""
def __init__(self, rv=None, rc=None, msg=None, post=None, *args, **kwargs):
self.rv = rv
self.rc = rc
self.msg = msg
if post is not None:
post(*args, **kwargs)
def __str__(self):
return ('rc=' + str(self.rc) + ', rv=' + str(self.rv) +
', msg=' + str(self.msg))
def __repr__(self):
return (self.__class__.__name__ + '(rc=' + str(self.rc) + ', rv=' +
str(self.rv) + ', msg=' + str(self.msg) + ')')
class PbsServiceError(PtlException):
pass
class PbsConnectError(PtlException):
pass
class PbsStatusError(PtlException):
pass
class PbsSubmitError(PtlException):
pass
class PbsManagerError(PtlException):
pass
class PbsDeljobError(PtlException):
pass
class PbsDelresvError(PtlException):
pass
class PbsDeleteError(PtlException):
pass
class PbsRunError(PtlException):
pass
class PbsSignalError(PtlException):
pass
class PbsMessageError(PtlException):
pass
class PbsHoldError(PtlException):
pass
class PbsReleaseError(PtlException):
pass
class PbsOrderError(PtlException):
pass
class PbsRerunError(PtlException):
pass
class PbsMoveError(PtlException):
pass
class PbsAlterError(PtlException):
pass
class PbsResourceError(PtlException):
pass
class PbsSelectError(PtlException):
pass
class PbsSchedConfigError(PtlException):
pass
class PbsMomConfigError(PtlException):
pass
class PbsFairshareError(PtlException):
pass
class PbsQdisableError(PtlException):
pass
class PbsQenableError(PtlException):
pass
class PbsQstartError(PtlException):
pass
class PbsQstopError(PtlException):
pass
class PtlExpectError(PtlException):
pass
class PbsInitServicesError(PtlException):
pass
class PbsQtermError(PtlException):
pass
class PbsTypeSize(str):
"""
Descriptor class for memory as a numeric entity.
Units can be one of ``b``, ``kb``, ``mb``, ``gb``, ``tb``, ``pt``
:param unit: The unit type associated to the memory value
:type unit: str
:param value: The numeric value of the memory
:type value: int or None
:raises: ValueError and TypeError
"""
def __init__(self, value=None):
if value is None:
return
if len(value) < 2:
raise ValueError
if value[-1:] in ('b', 'B') and value[:-1].isdigit():
self.unit = 'b'
self.value = int(int(value[:-1]) / 1024)
return
# lower() applied to ignore case
unit = value[-2:].lower()
self.value = value[:-2]
if not self.value.isdigit():
raise ValueError
if unit == 'kb':
self.value = int(self.value)
elif unit == 'mb':
self.value = int(self.value) * 1024
elif unit == 'gb':
self.value = int(self.value) * 1024 * 1024
elif unit == 'tb':
self.value = int(self.value) * 1024 * 1024 * 1024
elif unit == 'pb':
self.value = int(self.value) * 1024 * 1024 * 1024 * 1024
else:
raise TypeError
self.unit = 'kb'
def encode(self, value=None, valtype='kb', precision=1):
"""
Encode numeric memory input in kilobytes to a string, including
unit
:param value: The numeric value of memory to encode
:type value: int or None.
:param valtype: The unit of the input value, defaults to kb
:type valtype: str
:param precision: Precision of the encoded value, defaults to 1
:type precision: int
:returns: Encoded memory in kb to string
"""
if value is None:
value = self.value
if valtype == 'b':
val = value
elif valtype == 'kb':
val = value * 1024
elif valtype == 'mb':
val = value * 1024 * 1024
elif valtype == 'gb':
val = value * 1024 * 1024 * 1024 * 1024
elif valtype == 'tb':
val = value * 1024 * 1024 * 1024 * 1024 * 1024
elif valtype == 'pt':
val = value * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
m = (
(1 << 50, 'pb'),
(1 << 40, 'tb'),
(1 << 30, 'gb'),
(1 << 20, 'mb'),
(1 << 10, 'kb'),
(1, 'b')
)
for factor, suffix in m:
if val >= factor:
break
return '%.*f%s' % (precision, float(val) / factor, suffix)
def __cmp__(self, other):
if self.value < other.value:
return -1
if self.value == other.value:
return 0
return 1
def __lt__(self, other):
if self.value < other.value:
return True
return False
def __le__(self, other):
if self.value <= other.value:
return True
return False
def __gt__(self, other):
if self.value > other.value:
return True
return False
def __ge__(self, other):
if self.value < other.value:
return True
return False
def __eq__(self, other):
if self.value == other.value:
return True
return False
def __get__(self):
return self.value
def __add__(self, other):
if isinstance(other, int):
self.value += other
else:
self.value += other.value
return self
def __mul__(self, other):
if isinstance(other, int):
self.value *= other
else:
self.value *= other.value
return self
def __floordiv__(self, other):
self.value /= other.value
return self
def __sub__(self, other):
self.value -= other.value
return self
def __repr__(self):
return self.__str__()
def __str__(self):
return self.encode(valtype=self.unit)
class PbsTypeDuration(str):
"""
Descriptor class for a duration represented as ``hours``,
``minutes``, and ``seconds``,in the form of ``[HH:][MM:]SS``
:param as_seconds: HH:MM:SS represented in seconds
:type as_seconds: int
:param as_str: duration represented in HH:MM:SS
:type as_str: str
"""
def __init__(self, val):
if isinstance(val, str):
if ':' in val:
s = val.split(':')
l = len(s)
if l > 3:
raise ValueError
hr = mn = sc = 0
if l >= 2:
sc = s[l - 1]
mn = s[l - 2]
if l == 3:
hr = s[0]
self.duration = int(hr) * 3600 + int(mn) * 60 + int(sc)
elif val.isdigit():
self.duration = int(val)
elif isinstance(val, int) or isinstance(val, float):
self.duration = val
def __add__(self, other):
self.duration += other.duration
return self
def __sub__(self, other):
self.duration -= other.duration
return self
def __cmp__(self, other):
if self.duration < other.duration:
return -1
if self.duration == other.duration:
return 0
return 1
def __lt__(self, other):
if self.duration < other.duration:
return True
return False
def __le__(self, other):
if self.duration <= other.duration:
return True
return False
def __gt__(self, other):
if self.duration > other.duration:
return True
return False
def __ge__(self, other):
if self.duration < other.duration:
return True
return False
def __eq__(self, other):
if self.duration == other.duration:
return True
return False
def __get__(self):
return self.as_str
def __repr__(self):
return self.__str__()
def __int__(self):
return int(self.duration)
def __str__(self):
return str(datetime.timedelta(seconds=self.duration))
class PbsTypeArray(list):
"""
Descriptor class for a PBS array list type, e.g. String array
:param value: Array value to be passed
:param sep: Separator for two array elements
:type sep: str
:returns: List
"""
def __init__(self, value=None, sep=','):
self.separator = sep
self = list.__init__(self, value.split(sep))
def __str__(self):
return self.separator.join(self)
class PbsTypeList(dict):
"""
Descriptor class for a generic PBS list that are key/value pairs
delimited
:param value: List value to be passed
:param sep: Separator for two key/value pair
:type sep: str
:param kvsep: Separator for key and value
:type kvsep: str
:returns: Dictionary
"""
def __init__(self, value=None, sep=',', kvsep='='):
self.kvsep = kvsep
self.separator = sep
d = {}
as_list = map(lambda v: v.split(kvsep), value.split(sep))
if as_list:
for k, v in as_list:
d[k] = v
del as_list
dict.__init__(self, d)
def __str__(self):
s = []
for k, v in self.items():
s += [str(k) + self.kvsep + str(v)]
return self.separator.join(s)
class PbsTypeLicenseCount(PbsTypeList):
"""
Descriptor class for a PBS license_count attribute.
It is a specialized list where key/values are ':' delimited, separated
by a ' ' (space)
:param value: PBS license_count attribute value
:returns: Specialized list
"""
def __init__(self, value=None):
super(PbsTypeLicenseCount, self).__init__(value, sep=' ', kvsep=':')
class PbsTypeVariableList(PbsTypeList):
"""
Descriptor class for a PBS Variable_List attribute
It is a specialized list where key/values are '=' delimited, separated
by a ',' (space)
:param value: PBS Variable_List attribute value
:returns: Specialized list
"""
def __init__(self, value=None):
super(PbsTypeVariableList, self).__init__(value, sep=',', kvsep='=')
class PbsTypeSelect(list):
"""
Descriptor class for PBS select/schedselect specification.
Select is of the form:
``<select> ::= <m>":"<chunk> | <select>"+"<select>``
``<m> ::= <digit> | <digit><m>``
``<chunk> ::= <resc_name>":"<resc_value> | <chunk>":"<chunk>``
``<m>`` is a multiplying factor for each chunk requested
``<chunk>`` are resource key/value pairs
The type populates a list of single chunk of resource
``key/value`` pairs, the list can be walked by iterating over
the type itself.
:param num_chunks: The total number of chunks in the select
:type num_chunk: int
:param resources: A dictionary of all resource counts in the select
:type resources: Dictionary
"""
def __init__(self, s=None):
if s is not None:
self._as_str = s
self.resources = {}
self.num_chunks = 0
nc = s.split('+')
for chunk in nc:
self._parse_chunk(chunk)
def _parse_chunk(self, chunk):
d = chunk.split(':')
# number of chunks
_num_chunks = int(d[0])
self.num_chunks += _num_chunks
r = {}
for e in d[1:]:
k, v = e.split('=')
r[k] = v
if 'mem' in k:
try:
v = PbsTypeSize(v).value
except:
# failed so we guessed wrong on the type
pass
if isinstance(v, int) or v.isdigit():
if k not in self.resources:
self.resources[k] = _num_chunks * int(v)
else:
self.resources[k] += _num_chunks * int(v)
else:
if k not in self.resources:
self.resources[k] = v
else:
self.resources[k] = [self.resources[k], v]
# explicitly expose the multiplying factor
for _ in range(_num_chunks):
self.append(r)
def __add__(self, chunk=None):
if chunk is None:
return self
self._parse_chunk(chunk)
self._as_str = self._as_str + "+" + chunk
return self
def __repr__(self):
return str(self)
def __str__(self):
return self._as_str
class PbsTypeChunk(dict):
"""
Descriptor class for a PBS chunk associated to a
``PbsTypeExecVnode``.This type of chunk corresponds to
a node solution to a resource request,not to the select
specification.
``chunk ::= <subchk> | <chunk>"+"<chunk>``
``subchk ::= <node>":"<resource>``
``resource ::= <key>":"<val> | <resource>":"<resource>``
A chunk expresses a solution to a specific select-chunk
request. If multiple chunks are needed to solve a single
select-chunk, e.g., on a shared memory system, the chunk
will be extended into virtual chunk,vchunk.
:param vnode: the vnode name corresponding to the chunk
:type vnode: str or None
:param resources: the key value pair of resources in
dictionary form
:type resources: Dictionary or None
:param vchunk: a list of virtual chunks needed to solve
the select-chunk, vchunk is only set if more
than one vchunk are required to solve the
select-chunk
:type vchunk: list
"""
def __init__(self, vnode=None, resources=None, chunkstr=None):
self.vnode = vnode
if resources is not None:
self.resources = resources
else:
self.resources = {}
self.vchunk = []
self.as_str = chunkstr
self.__parse_chunk(chunkstr)
def __parse_chunk(self, chunkstr=None):
if chunkstr is None:
return
vchunks = chunkstr.split('+')
if len(vchunks) == 1:
entities = chunkstr.split(':')
self.vnode = entities[0]
if len(entities) > 1:
for e in entities[1:]:
(r, v) = e.split('=')
self.resources[r] = v
self[self.vnode] = self.resources
else:
for sc in vchunks:
chk = PbsTypeChunk(chunkstr=sc)
self.vchunk.append(chk)
self[chk.vnode] = chk.resources
def add(self, vnode, resources):
"""
Add a chunk specificiation. If a chunk is already
defined, add the chunk as a vchunk.
:param vnode: The vnode to add
:type vnode: str
:param resources: The resources associated to the
vnode
:type resources: str
:returns: Added chunk specification
"""
if self.vnode == vnode:
self.resources = dict(self.resources.items() + resources.items())
return self
elif len(self.vchunk) != 0:
for chk in self.vchunk:
if chk.vnode == vnode:
chk.resources = dict(self.resources.items() +
resources.items())
return self
chk = PbsTypeChunk(vnode, resources)
self.vchunk.append(chk)
return self
def __repr__(self):
return self.__str__()
def __str__(self):
_s = ["("]
_s += [self.vnode, ":"]
for resc_k, resc_v in self.resources.items():
_s += [resc_k, "=", str(resc_v)]
if self.vchunk:
for _v in self.vchunk:
_s += ["+", _v.vnode, ":"]
for resc_k, resc_v in _v.resources.items():
_s += [resc_k, "=", str(resc_v)]
_s += [")"]
return "".join(_s)
class PbsTypeExecVnode(list):
"""
Execvnode representation, expressed as a list of
PbsTypeChunk
:param vchunk: List of virtual chunks, only set when
more than one vnode is allocated to a
host satisfy a chunk requested
:type vchunk: List
:param num_chunks: The number of chunks satisfied by
this execvnode
:type num_chunks: int
:param vnodes: List of vnode names allocated to the execvnode
:type vnodes: List
:param resource: method to return the amount of a named
resource satisfied by this execvnode
"""
def __init__(self, s=None):
if s is None:
return None
self._as_str = s
start = 0
self.num_chunks = 0
for c in range(len(s)):
# must split on '+' between parens because '+' can occur within
# paren for complex specs
if s[c] == '(':
start = c + 1
if s[c] == ')':
self.append(PbsTypeChunk(chunkstr=s[start:c]))
self.num_chunks += 1
def resource(self, name=None):
"""
:param name: Name of the resource
:type name: str or None
"""
if name is None:
return None
_total = 0
for _c in self:
if _c.vchunk:
for _v in _c.vchunk:
if name in _v.resources:
_total += int(_v.resources[name])
if name in _c.resources:
if name in _c.resources:
_total += int(_c.resources[name])
return _total
@property
def vnodes(self):
vnodes = []
for e in self:
vnodes += [e.vnode]
if e.vchunk:
vnodes += map(lambda n: n.vnode, e.vchunk)
return list(set(vnodes))
def _str__(self):
return self._as_str
# below would be to verify that the converted type maps back correctly
_s = []
for _c in self:
_s += [str(_c)]
return "+".join(_s)
class PbsTypeExecHost(str):
"""
Descriptor class for exec_host attribute
:param hosts: List of hosts in the exec_host. Each entry is
a host info dictionary that maps the number of
cpus and its task number
:type hosts: List
"""
def __init__(self, s=None):
if s is None:
return None
self._as_str = s
self.hosts = []
hsts = s.split('+')
for h in hsts:
hi = {}
ti = {}
(host, task) = h.split('/',)
d = task.split('*')
if len(d) == 1:
taskslot = d[0]
ncpus = 1
elif len(d) == 2:
(taskslot, ncpus) = d
else:
(taskslot, ncpus) = (0, 1)
ti['task'] = taskslot
ti['ncpus'] = ncpus
hi[host] = ti
self.hosts.append(hi)
def __repr__(self):
return str(self.hosts)
def __str__(self):
return self._as_str
class PbsTypeJobId(str):
"""
Descriptor class for a Job identifier
:param id: The numeric portion of a job identifier
:type id: int
:param server_name: The pbs server name
:type server_name: str
:param server_shortname: The first portion of a FQDN server
name
:type server_shortname: str
"""
def __init__(self, value=None):
if value is None:
return
self.value = value
r = value.split('.', 1)
if len(r) != 2:
return
self.id = int(r[0])
self.server_name = r[1]
self.server_shortname = r[1].split('.', 1)[0]
def __str__(self):
return str(self.value)
class PbsUser(object):
"""
The PbsUser type augments a PBS username to associate
it to groups to which the user belongs
:param name: The user name referenced
:type name: str
:param uid: uid of user
:type uid: int or None
:param groups: The list of PbsGroup objects the user
belongs to
:type groups: List or None
"""
def __init__(self, name, uid=None, groups=None):
self.name = name
if uid is not None:
self.uid = int(uid)
else:
self.uid = None
self.home = None
self.gid = None
self.shell = None
self.gecos = None
try:
_user = pwd.getpwnam(self.name)
self.uid = _user.pw_uid
self.home = _user.pw_dir
self.gid = _user.pw_gid
self.shell = _user.pw_shell
self.gecos = _user.pw_gecos
except:
pass
if groups is None:
self.groups = []
elif isinstance(groups, list):
self.groups = groups
else:
self.groups = groups.split(",")
for g in self.groups:
if isinstance(g, str):
self.groups.append(PbsGroup(g, users=[self]))
elif self not in g.users:
g.users.append(self)
def __repr__(self):
return str(self.name)
def __str__(self):
return self.__repr__()
def __int__(self):
return int(self.uid)
class PbsGroup(object):
"""
The PbsGroup type augments a PBS groupname to associate it
to users to which the group belongs
:param name: The group name referenced
:type name: str
:param gid: gid of group
:type gid: int or None
:param users: The list of PbsUser objects the group belongs to
:type users: List or None
"""
def __init__(self, name, gid=None, users=None):
self.name = name
if gid is not None:
self.gid = int(gid)
else:
self.gid = None
try:
_group = grp.getgrnam(self.name)
self.gid = _group.gr_gid
except:
pass
if users is None:
self.users = []
elif isinstance(users, list):
self.users = users
else:
self.users = users.split(",")
for u in self.users:
if isinstance(u, str):
self.users.append(PbsUser(u, groups=[self]))
elif self not in u.groups:
u.groups.append(self)
def __repr__(self):
return str(self.name)
def __str__(self):
return self.__repr__()
def __int__(self):
return int(self.gid)
class BatchUtils(object):
"""
Utility class to create/convert/display various PBS
data structures
"""
legal = "\d\w:\+=\[\]~"
chunks_tag = re.compile("(?P<chunk>\([\d\w:\+=\[\]~]\)[\+]?)")
chunk_tag = re.compile("(?P<vnode>[\w\d\[\]]+):" +
"(?P<resources>[\d\w:\+=\[\]~])+\)")
array_tag = re.compile("(?P<jobid>[\d]+)\[(?P<subjobid>[0-9]*)\]*" +
"[.]*[(?P<server>.*)]*")
subjob_tag = re.compile("(?P<jobid>[\d]+)\[(?P<subjobid>[0-9]+)\]*" +
"[.]*[(?P<server>.*)]*")
pbsobjname_re = re.compile("^([\w\d][\d\w\s]*:?[\s]+)" +
"*(?P<name>[\w@\.\d\[\]-]+)$")
pbsobjattrval_re = re.compile(r"""
[\s]*(?P<attribute>[\w\d\.-]+)
[\s]*=[\s]*
(?P<value>.*)
[\s]*""",
re.VERBOSE)
dt_re = '(?P<dt_from>\d\d/\d\d/\d\d\d\d \d\d:\d\d)' + \
'[\s]+' + \
'(?P<dt_to>\d\d/\d\d/\d\d\d\d \d\d:\d\d)'
dt_tag = re.compile(dt_re)
hms_tag = re.compile('(?P<hr>\d\d):(?P<mn>\d\d):(?P<sc>\d\d)')
lim_tag = re.compile("(?P<limtype>[a-z_]+)[\.]*(?P<resource>[\w\d-]*)"
"=[\s]*\[(?P<entity_type>[ugpo]):"
"(?P<entity_name>[\w\d-]+)"
"=(?P<entity_value>[\d\w]+)\][\s]*")
def __init__(self):
self.logger = logging.getLogger(__name__)
self.du = DshUtils()
def list_to_attrl(self, l):
"""
Convert a list to a PBS attribute list
:param l: List to be converted
:type l: List
:returns: PBS attribute list
"""
return self.list_to_attropl(l, None)
def list_to_attropl(self, l, op=SET):
"""
Convert a list to a PBS attribute operation list
:param l: List to be converted
:type l: List
:returns: PBS attribute operation list
"""
head = None
prev = None
for i in l:
a = self.str_to_attropl(i, op)
if prev is None:
head = a
else:
prev.next = a
prev = a
if op is not None:
a.op = op
return head
def str_to_attrl(self, s):
"""
Convert a string to a PBS attribute list
:param s: String to be converted
:type s: str
:returns: PBS attribute list
"""
return self.str_to_attropl(s, None)
def str_to_attropl(self, s, op=SET):
"""
Convert a string to a PBS attribute operation list
:param s: String to be converted
:type s: str
:returns: PBS attribute operation list
"""
if op is not None:
a = attropl()
else:
a = attrl()
if '.' in s:
(attribute, resource) = s.split('.')
a.name = attribute
a.resource = resource.strip()
else:
a.name = s
a.value = ''
a.next = None
if op:
a.op = op
return a
def dict_to_attrl(self, d={}):
"""
Convert a dictionary to a PBS attribute list
:param d: Dictionary to be converted
:type d: Dictionary
:returns: PBS attribute list
"""
return self.dict_to_attropl(d, None)
def dict_to_attropl(self, d={}, op=SET):
"""
Convert a dictionary to a PBS attribute operation list
:param d: Dictionary to be converted
:type d: Dictionary
:returns: PBS attribute operation list
"""
if len(d.keys()) == 0:
return None
prev = None
head = None
for k, v in d.items():
if isinstance(v, tuple):
op = v[0]
v = v[1]
if op is not None:
a = attropl()
else:
a = attrl()
if '.' in k:
(attribute, resource) = k.split('.')
a.name = attribute
a.resource = resource
else:
a.name = k
a.value = str(v)
if op is not None:
a.op = op
a.next = None
if prev is None:
head = a
else:
prev.next = a
prev = a
return head
def convert_to_attrl(self, attrib):
"""
Generic call to convert Python type to PBS attribute list
:param attrib: Attributes to be converted
:type attrib: List or tuple or dictionary or str
:returns: PBS attribute list
"""
return self.convert_to_attropl(attrib, None)
def convert_to_attropl(self, attrib, cmd=MGR_CMD_SET, op=None):
"""
Generic call to convert Python type to PBS attribute
operation list
:param attrib: Attributes to be converted
:type attrib: List or tuple or dictionary or str
:returns: PBS attribute operation list
"""
if op is None:
op = self.command_to_op(cmd)
if isinstance(attrib, (list, tuple)):
a = self.list_to_attropl(attrib, op)
elif isinstance(attrib, (dict, OrderedDict)):
a = self.dict_to_attropl(attrib, op)
elif isinstance(attrib, str):
a = self.str_to_attropl(attrib, op)
else:
a = None
return a
def command_to_op(self, cmd=None):
"""
Map command to a ``SET`` or ``UNSET`` Operation. An unrecognized
command will return SET. No command will return None.
:param cmd: Command to be mapped
:type cmd: str
:returns: ``SET`` or ``UNSET`` operation for the command
"""
if cmd is None:
return None
if cmd in (MGR_CMD_SET, MGR_CMD_EXPORT, MGR_CMD_IMPORT):
return SET
if cmd == MGR_CMD_UNSET:
return UNSET
return SET
def display_attrl(self, a=None, writer=sys.stdout):
"""
Display an attribute list using writer, defaults to sys.stdout
:param a: Attributes
:type a: List
:returns: Displays attribute list
"""
return self.display_attropl(a)
def display_attropl(self, attropl=None, writer=sys.stdout):
"""
Display an attribute operation list with writer, defaults to
sys.stdout
:param attropl: Attribute operation list
:type attropl: List
:returns: Displays an attribute operation list
"""
attrs = attropl
while attrs is not None:
if attrs.resource:
writer.write('\t' + attrs.name + '.' + attrs.resource + '= ' +
attrs.value + '\n')
else:
writer.write('\t' + attrs.name + '= ' + attrs.value + '\n')
attrs = attrs.next
def display_dict(self, d, writer=sys.stdout):
"""
Display a dictionary using writer, defaults to sys.stdout
:param d: Dictionary
:type d: Dictionary
:returns: Displays a dictionary
"""
if not d:
return
for k, v in d.items():
writer.write(k + ': ' + v + '\n')
def batch_status_to_dictlist(self, bs=None, attr_names=None, id=None):
"""
Convert a batch status to a list of dictionaries.
version 0.1a6 added this conversion as a typemap(out) as
part of the swig wrapping itself so there are fewer uses
for this function.Returns a list of dictionary
representation of batch status
:param bs: Batch status
:param attr_names: Attribute names
:returns: List of dictionaries
"""
attr_time = (
'ctime', 'mtime', 'qtime', 'start', 'end', 'reserve_start',
'reserve_end', 'estimated.start_time')
ret = []
while bs:
if id is not None and bs.name != id:
bs = bs.next
continue
d = {}
attrs = bs.attribs
while attrs is not None:
if attrs.resource:
key = attrs.name + '.' + attrs.resource
else:
key = attrs.name
if attr_names is not None:
if key not in attr_names:
attrs = attrs.next
continue
val = attrs.value
if attrs.name in attr_time:
val = self.convert_time(val)
# for attributes that may occur multiple times (e.g., max_run)
# append the value in a comma-separated representation
if key in d:
d[key] = d[key] + ',' + str(val)
else:
d[key] = str(val)
attrs = attrs.next
if len(d.keys()) > 0:
ret.append(d)
d['id'] = bs.name
bs = bs.next
return ret
def display_batch_status(self, bs=None, attr_names=None,
writer=sys.stdout):
"""
Display a batch status using writer, defaults to sys.stdout
:param bs: Batch status
:param attr_name: Attribute name
:type attr_name: str
:returns: Displays batch status
"""
if bs is None:
return
l = self.batch_status_to_dictlist(bs, attr_names)
self.display_batch_status_as_dictlist(l, writer)
def display_dictlist(self, l=[], writer=sys.stdout, fmt=None):
"""
Display a list of dictionaries using writer, defaults to
sys.stdout
:param l: The list to display
:type l: List
:param writer: The stream on which to write
:param fmt: An optional formatting string
:type fmt: str or None
:returns: Displays list of dictionaries
"""
self.display_batch_status_as_dictlist(l, writer, fmt)
def dictlist_to_file(self, l=[], filename=None, mode='w'):
"""
write a dictlist to file
:param l: Dictlist
:type l: List
:param filename: File to which dictlist need to be written
:type filename: str
:param mode: Mode of file
:type mode: str
:raises: Exception writing to file
"""
if filename is None:
self.logger.error('a filename is required')
return
d = os.path.dirname(filename)
if d != '' and not os.path.isdir(d):
os.makedirs(d)
try:
f = open(filename, mode)
self.display_dictlist(l, f)
f.close()
except:
self.logger.error('error writing to file ' + filename)
raise
def batch_status_as_dictlist_to_file(self, l=[], writer=sys.stdout):
"""
Write a dictlist to file
:param l: Dictlist
:type l: List
:raises: Exception writing to file
"""
return self.dictlist_to_file(l, writer)
def file_to_dictlist(self, file=None, attribs=None, id=None):
"""
Convert a file to a batch dictlist format
:param file: File to be converted
:type file: str
:param attribs: Attributes
:returns: File converted to a batch dictlist format
"""
if file is None:
return []
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except Exception, e:
self.logger.error('error converting list of dictionaries to ' +
'file ' + str(e))
return []
return self.convert_to_dictlist(lines, attribs, id=id)
def file_to_vnodedef(self, file=None):
"""
Convert a file output of pbsnodes -av to a vnode
definition format
:param file: File to be converted
:type sile: str
:returns: Vnode definition format
"""
if file is None:
return None
try:
f = open(file, 'r')
lines = f.readlines()
f.close()
except:
self.logger.error('error converting nodes to vnode def')
return None
dl = self.convert_to_dictlist(lines)
return self.dictlist_to_vnodedef(dl)
def show(self, l=[], name=None, fmt=None):
"""
Alias to display_dictlist with sys.stdout as writer
:param name: if specified only show the object of
that name
:type name: str
:param fmt: Optional formatting string, uses %n for
object name, %a for attributes, for example
a format of '%nE{\}nE{\}t%aE{\}n' will display
objects with their name starting on the first
column, a new line, and attributes indented by
a tab followed by a new line at the end.
:type fmt: str
"""
if name:
i = 0
for obj in l:
if obj['id'] == name:
l = [l[i]]
break
i += 1
self.display_dictlist(l, fmt=fmt)
def get_objtype(self, d={}):
"""
Get the type of a given object
:param d: Dictionary
:type d: Dictionary
:Returns: Type of the object
"""
if 'Job_Name' in d:
return JOB
elif 'queue_type' in d:
return QUEUE
elif 'Reserve_Name' in d:
return RESV
elif 'server_state' in d:
return SERVER
elif 'Mom' in d:
return NODE
elif 'event' in d:
return HOOK
elif 'type' in d:
return RSC
return None
def display_batch_status_as_dictlist(self, l=[], writer=sys.stdout,
fmt=None):
"""
Display a batch status as a list of dictionaries
using writer, defaults to sys.stdout
:param l: List
:type l: List
:param fmt: - Optional format string
:type fmt: str or None
:returns: Displays batch status as a list of dictionaries
"""
if l is None:
return
for d in l:
self.display_batch_status_as_dict(d, writer, fmt)
def batch_status_as_dict_to_str(self, d={}, fmt=None):
"""
Return a string representation of a batch status dictionary
:param d: Dictionary
:type d: Dictionary
:param fmt: Optional format string
:type fmt: str or None
:returns: String representation of a batch status dictionary
"""
objtype = self.get_objtype(d)
if fmt is not None:
if '%1' in fmt:
_d1 = fmt['%1']
else:
_d1 = '\n'
if '%2' in fmt:
_d2 = fmt['%2']
else:
_d2 = ' '
if '%3' in fmt:
_d3 = fmt['%3']
else:
_d3 = ' = '
if '%4' in fmt:
_d4 = fmt['%4']
else:
_d4 = '\n'
if '%5' in fmt:
_d5 = fmt['%5']
else:
_d5 = '\n'
if '%6' in fmt:
_d6 = fmt['%6']
else:
_d6 = ''
else:
_d1 = '\n'
_d2 = ' '
_d3 = ' = '
_d4 = '\n'
_d5 = '\n'
_d6 = ''
if objtype == JOB:
_n = 'Job Id: ' + d['id'] + _d1
elif objtype == QUEUE:
_n = 'Queue: ' + d['id'] + _d1
elif objtype == RESV:
_n = 'Name: ' + d['id'] + _d1
elif objtype == SERVER:
_n = 'Server: ' + d['id'] + _d1
elif objtype == RSC:
_n = 'Resource: ' + d['id'] + _d1
elif 'id' in d:
_n = d['id'] + _d1
del d['id']
else:
_n = ''
_a = []
for k, v in sorted(d.items()):
if k == 'id':
continue
_a += [_d2 + k + _d3 + str(v)]
return _n + _d4.join(_a) + _d5 + _d6
def display_batch_status_as_dict(self, d={}, writer=sys.stdout, fmt=None):
"""
Display a dictionary representation of a batch status
using writer, defaults to sys.stdout
:param d: Dictionary
:type d: Dictionary
:param fmt: Optional format string
:param fmt: str
:returns: Displays dictionary representation of a batch
status
"""
writer.write(self.batch_status_as_dict_to_str(d, fmt))
def decode_dictlist(self, l=None, json=True):
"""
decode a list of dictionaries
:param l: List of dictionaries
:type l: List
:param json: The target of the decode is meant for ``JSON``
formatting
:returns: Decoded list of dictionaries
"""
if l is None:
return ''
_js = []
for d in l:
_jdict = {}
for k, v in d.items():
if ',' in v:
_jdict[k] = v.split(',')
else:
_jdict[k] = self.decode_value(v)
_js.append(_jdict)
return _js
def convert_to_dictlist(self, l, attribs=None, mergelines=True, id=None):
"""
Convert a list of records into a dictlist format.
:param l: array of records to convert
:type l: List
:param mergelines: merge qstat broken lines into one
:returns: Record list converted into dictlist format
"""
if mergelines:
lines = []
for i in range(len(l)):
if l[i].startswith('\t'):
_e = len(lines) - 1
lines[_e] = lines[_e].strip('\r\n\t') + \
l[i].strip('\r\n\t')
else:
lines.append(l[i])
else:
lines = l
objlist = []
d = {}
for l in lines:
l = l.strip()
m = self.pbsobjname_re.match(l)
if m:
if len(d.keys()) > 1:
if id is None or (id is not None and d['id'] == id):
objlist.append(d.copy())
d = {}
d['id'] = m.group('name')
else:
m = self.pbsobjattrval_re.match(l)
if m:
attr = m.group('attribute')
if attribs is None or attr in attribs:
if attr in d:
d[attr] = d[attr] + "," + m.group('value')
else:
d[attr] = m.group('value')
# add the last element
if len(d.keys()) > 1:
if id is None or (id is not None and d['id'] == id):
objlist.append(d.copy())
return objlist
def convert_to_batch(self, l, mergelines=True):
"""
Convert a list of records into a batch format.
:param l: array of records to convert
:type l: List
:param mergelines: qstat breaks long lines over
multiple lines, merge them\
to one by default.
:type mergelines: bool
:returns: A linked list of batch status
"""
if mergelines:
lines = []
for i in range(len(l)):
if l[i].startswith('\t'):
_e = len(lines) - 1
lines[_e] = lines[_e].strip('\r\t') + \
l[i].strip('\r\n')
else:
lines.append(l[i])
else:
lines = l
head_bs = None
prev_bs = None
prev_attr = None
for l in lines:
l = l.strip()
m = self.pbsobjname_re.match(l)
if m:
bs = batch_status()
bs.name = m.group('name')
bs.attribs = None
bs.next = None
if prev_bs:
prev_bs.next = bs
if head_bs is None:
head_bs = bs
prev_bs = bs
prev_attr = None
else:
m = self.pbsobjattrval_re.match(l)
if m:
attr = attrl()
attr.name = m.group('attribute')
attr.value = m.group('value')
attr.next = None
if bs.attribs is None:
bs.attribs = attr
if prev_attr:
prev_attr.next = attr
prev_attr = attr
return head_bs
def file_to_batch(self, file=None):
"""
Convert a file to batch format
:param file: File to be converted
:type file: str or None
:returns: File converted into batch format
"""
if file is None:
return None
try:
f = open(file, 'r')
l = f.readlines()
f.close()
except:
self.logger.error('error converting file ' + file + ' to batch')
return None
return self.convert_to_batch(l)
def batch_to_file(self, bs=None, file=None):
"""
Write a batch object to file
:param bs: Batch status
:param file: File to which batch object is to be written
:type file: str
"""
if bs is None or file is None:
return
try:
f = open(file, 'w')
self.display_batch_status(bs, writer=f)
f.close()
except:
self.logger.error('error converting batch status to file')
def batch_to_vnodedef(self, bs):
"""
:param bs: Batch status
:returns: The vnode definition string representation
of nodes batch_status
"""
out = ["$configversion 2\n"]
while bs is not None:
attr = bs.attribs
while attr is not None:
if attr.name.startswith("resources_available") or \
attr.name.startswith("sharing"):
out += [bs.name + ": "]
out += [attr.name + "=" + attr.value + "\n"]
attr = attr.next
bs = bs.next
return "".join(out)
def dictlist_to_vnodedef(self, dl=None):
"""
:param dl: Dictionary list
:type dl: List
:returns: The vnode definition string representation
of a dictlist
"""
if dl is None:
return ''
out = ["$configversion 2\n"]
for node in dl:
for k, v in node.items():
if (k.startswith("resources_available") or
k.startswith("sharing") or
k.startswith("provision_enable") or
k.startswith("queue")):
out += [node['id'] + ": "]
# MoM dislikes empty values reported in vnode defs so
# we substitute no value for an actual empty string
if not v:
v = '""'
out += [k + "=" + str(v) + "\n"]
return "".join(out)
def objlist_to_dictlist(self, objlist=None):
"""
Convert a list of PBS/PTL objects ``(e.g. Server/Job...)``
into a dictionary list representation of the batch status
:param objlist: List of ``PBS/PTL`` objects
:type objlist: List
:returns: Dictionary list representation of the batch status
"""
if objlist is None:
return None
bsdlist = []
for obj in objlist:
newobj = self.obj_to_dict(obj)
bsdlist.append(newobj)
return bsdlist
def obj_to_dict(self, obj):
"""
Convert a PBS/PTL object (e.g. Server/Job...) into a
dictionary format
:param obj: ``PBS/PTL`` object
:returns: Dictionary of ``PBS/PTL`` objects
"""
newobj = dict(obj.attributes.items())
newobj[id] = obj.name
return newobj
def parse_execvnode(self, s=None):
"""
Parse an execvnode string into chunk objects
:param s: Execvnode string
:type s: str or None
:returns: Chunk objects for parsed execvnode string
"""
if s is None:
return None
chunks = []
start = 0
for c in range(len(s)):
if s[c] == '(':
start = c + 1
if s[c] == ')':
chunks.append(PbsTypeChunk(chunkstr=s[start:c]).info)
return chunks
def anupbs_exechost_numhosts(self, s=None):
"""
:param s: Exechost string
:type s: str or None
"""
n = 0
if '[' in s:
eh = re.sub(r'.*\[(.*)\].*', r'\1', s)
hosts = eh.split(',')
for hid in hosts:
elm = hid.split('-')
if len(elm) == 2:
n += int(elm[1]) - int(elm[0]) + 1
else:
n += 1
else:
n += 1
return n
def parse_exechost(self, s=None):
"""
Parse an exechost string into a dictionary representation
:param s: String to be parsed
:type s: str or None
:returns: Dictionary format of the exechost string
"""
if s is None:
return None
hosts = []
hsts = s.split('+')
for h in hsts:
hi = {}
ti = {}
(host, task) = h.split('/',)
d = task.split('*')
if len(d) == 1:
taskslot = d[0]
ncpus = 1
elif len(d) == 2:
(taskslot, ncpus) = d
else:
(taskslot, ncpus) = (0, 1)
ti['task'] = taskslot
ti['ncpus'] = ncpus
hi[host] = ti
hosts.append(hi)
return hosts
def parse_select(self, s=None):
"""
Parse a ``select/schedselect`` string into a list
of dictionaries.
:param s: select/schedselect string
:type s: str or None
:returns: List of dictonaries
"""
if s is None:
return
info = []
chunks = s.split('+')
for chunk in chunks:
d = chunk.split(':')
numchunks = int(d[0])
resources = {}
for e in d[1:]:
k, v = e.split('=')
resources[k] = v
for _ in range(numchunks):
info.append(resources)
return info
@classmethod
def isfloat(cls, value):
"""
returns true if value is a float or a string representation
of a float returns false otherwise
:param value: value to be checked
:type value: str or int or float
:returns: True or False
"""
if isinstance(value, float):
return True
if isinstance(value, str):
try:
float(value)
return True
except ValueError:
return False
@classmethod
def decode_value(cls, value):
"""
Decode an attribute/resource value, if a value is
made up of digits only then return the numeric value
of it, if it is made of alphanumeric values only, return
it as a string, if it is of type size, i.e., with a memory
unit such as b,kb,mb,gb then return the converted size to
kb without the unit
:param value: attribute/resource value
:type value: str or int
:returns: int or float or string
"""
if value is None or callable(value):
return value
if isinstance(value, (int, float)):
return value
if value.isdigit():
return int(value)
if value.isalpha() or value == '':
return value
if cls.isfloat(value):
return float(value)
if ':' in value:
try:
value = int(PbsTypeDuration(value))
except ValueError:
pass
return value
# TODO revisit: assume (this could be the wrong type, need a real
# data model anyway) that the remaining is a memory expression
try:
value = PbsTypeSize(value)
return value.value
except ValueError:
pass
except TypeError:
# if not then we pass to return the value as is
pass
return value
def convert_time(self, val, fmt='%a %b %d %H:%M:%S %Y'):
"""
Convert a date time format into number of seconds
since epoch
:param val: date time value
:param fmt: date time format
:type fmt: str
:returns: seconds
"""
# Tweak for NAS format that puts the number of seconds since epoch
# in between
if val.split()[0].isdigit():
val = int(val.split()[0])
elif not val.isdigit():
val = time.strptime(val, fmt)
val = int(time.mktime(val))
return val
def convert_duration(self, val):
"""
Convert HH:MM:SS into number of seconds
If a number is fed in, that number is returned
If neither formatted data is fed in, returns 0
:param val: duration value
:type val: str
:raises: Incorrect format error
:returns: seconds
"""
if val.isdigit():
return int(val)
hhmmss = val.split(':')
if len(hhmmss) != 3:
self.logger.error('Incorrect format, expected HH:MM:SS')
return 0
return int(hhmmss[0]) * 3600 + int(hhmmss[1]) * 60 + int(hhmmss[2])
def convert_seconds_to_resvtime(self, tm, fmt=None, seconds=True):
"""
Convert time format to number of seconds since epoch
:param tm: the time to convert
:type tm: str
:param fmt: optional format string. If used, the seconds
parameter is ignored.Defaults to ``%Y%m%d%H%M``
:type fmt: str or None
:param seconds: if True, convert time with seconds
granularity. Defaults to True.
:type seconds: bool
:returns: Number of seconds
"""
if fmt is None:
fmt = "%Y%m%d%H%M"
if seconds:
fmt += ".%S"
return time.strftime(fmt, time.localtime(int(tm)))
def convert_stime_to_seconds(self, st):
"""
Convert a time to seconds, if we fail we return the
original time
:param st: Time to be converted
:type st: str
:returns: Number of seconds
"""
try:
ret = time.mktime(time.strptime(st, '%a %b %d %H:%M:%S %Y'))
except:
ret = st
return ret
def convert_dedtime(self, dtime):
"""
Convert dedicated time string of form %m/%d/%Y %H:%M.
:param dtime: A datetime string, as an entry in the
dedicated_time file
:type dtime: str
:returns: A tuple of (from,to) of time since epoch
"""
dtime_from = None
dtime_to = None
m = self.dt_tag.match(dtime.strip())
if m:
try:
_f = "%m/%d/%Y %H:%M"
dtime_from = self.convert_datetime_to_epoch(m.group('dt_from'),
fmt=_f)
dtime_to = self.convert_datetime_to_epoch(m.group('dt_to'),
fmt=_f)
except:
self.logger.error('error converting dedicated time')
return (dtime_from, dtime_to)
def convert_datetime_to_epoch(self, mdyhms, fmt="%m/%d/%Y %H:%M:%S"):
"""
Convert the date time to epoch
:param mdyhms: date time
:type mdyhms: str
:param fmt: Format for date time
:type fmt: str
:returns: Epoch time
"""
return int(time.mktime(time.strptime(mdyhms, fmt)))
def compare_versions(self, v1, v2, op=None):
"""
Compare v1 to v2 with respect to operation op
:param v1: If not a looseversion, it gets converted
to it
:param v2: If not a looseversion, it gets converted
to it
:param op: An operation, one of ``LT``, ``LE``, ``EQ``,
``GE``, ``GT``
:type op: str
:returns: True or False
"""
if op is None:
self.logger.error('missing operator, one of LT,LE,EQ,GE,GT')
return None
if v1 is None or v2 is None:
return False
if isinstance(v1, str):
v1 = LooseVersion(v1)
if isinstance(v2, str):
v2 = LooseVersion(v2)
if op == GT:
if v1 > v2:
return True
elif op == GE:
if v1 >= v2:
return True
elif op == EQ:
if v1 == v2:
return True
elif op == LT:
if v1 < v2:
return True
elif op == LE:
if v1 <= v2:
return True
return False
def convert_arglist(self, attr):
"""
strip the XML attributes from the argument list attribute
:param attr: Argument list attributes
:type attr: List
:returns: Stripped XML attributes
"""
xmls = "<jsdl-hpcpa:Argument>"
xmle = "</jsdl-hpcpa:Argument>"
nattr = attr.replace(xmls, " ")
nattr = nattr.replace(xmle, " ")
return nattr.strip()
def convert_to_cli(self, attrs, op=None, hostname=None, dflt_conf=True,
exclude_attrs=None):
"""
Convert attributes into their CLI format counterpart. This
method is far from complete, it grows as needs come by and
could use a rewrite, especially going along with a rewrite
of pbs_api_to_cli
:param attrs: Attributes to convert
:type attrs: List or str or dictionary
:param op: The qualifier of the operation being performed,
such as ``IFL_SUBMIT``, ``IFL_DELETE``,
``IFL_TERMINUTE``...
:type op: str or None
:param hostname: The name of the host on which to operate
:type hostname: str or None
:param dflt_conf: Whether we are using the default PBS
configuration
:type dflt_conf: bool
:param exclude_attrs: Optional list of attributes to not
convert
:type exclude_attrs: List
:returns: CLI format of attributes
"""
ret = []
if op == IFL_SUBMIT:
executable = arglist = None
elif op == IFL_DELETE:
_c = []
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(attrs, list):
for a in attrs:
if 'force' in a:
_c.append('-W')
_c.append('force')
if 'deletehist' in a:
_c.append('-x')
return _c
elif op == IFL_TERMINATE:
_c = []
if attrs is None:
_c = []
elif isinstance(attrs, str):
_c = ['-t', attrs]
else:
if ((attrs & SHUT_QUICK) == SHUT_QUICK):
_c = ['-t', 'quick']
if ((attrs & SHUT_IMMEDIATE) == SHUT_IMMEDIATE):
_c = ['-t', 'immediate']
if ((attrs & SHUT_DELAY) == SHUT_DELAY):
_c = ['-t', 'delay']
if ((attrs & SHUT_WHO_SCHED) == SHUT_WHO_SCHED):
_c.append('-s')
if ((attrs & SHUT_WHO_MOM) == SHUT_WHO_MOM):
_c.append('-m')
if ((attrs & SHUT_WHO_SECDRY) == SHUT_WHO_SECDRY):
_c.append('-f')
if ((attrs & SHUT_WHO_IDLESECDRY) == SHUT_WHO_IDLESECDRY):
_c.append('-F')
if ((attrs & SHUT_WHO_SECDONLY) == SHUT_WHO_SECDONLY):
_c.append('-i')
return _c
if attrs is None or len(attrs) == 0:
return ret
# if a list, convert to a dictionary to fall into a single processing
# of the attributes
if (isinstance(attrs, list) and len(attrs) > 0 and
not isinstance(attrs[0], tuple)):
tmp_attrs = {}
for each_attr in attrs:
tmp_attrs[each_attr] = ''
del attrs
attrs = tmp_attrs
del tmp_attrs
if isinstance(attrs, (dict, OrderedDict)):
attrs = attrs.items()
for a, v in attrs:
if exclude_attrs is not None and a in exclude_attrs:
continue
if op == IFL_SUBMIT:
if a == ATTR_executable:
executable = v
continue
if a == ATTR_Arglist:
if v is not None:
arglist = self.convert_arglist(v)
if len(arglist) == 0:
return []
continue
if isinstance(v, list):
v = ','.join(v)
# when issuing remote commands, escape spaces in attribute values
if (((hostname is not None) and
(not self.du.is_localhost(hostname))) or
(not dflt_conf)):
if ' ' in str(v):
v = '"' + v + '"'
if '.' in a:
(attribute, resource) = a.split('.')
ret.append('-' + api_to_cli[attribute])
rv = resource
if v is not None:
rv += '=' + str(v)
ret.append(rv)
else:
try:
val = api_to_cli[a]
except KeyError:
self.logger.error('error retrieving key ' + str(a))
# for unknown or junk options
ret.append(a)
if v is not None:
ret.append(str(v))
continue
# on a remote job submit append the remote server name
# to the queue name
if ((op == IFL_SUBMIT) and (hostname is not None)):
if ((not self.du.is_localhost(hostname)) and
(val == 'q') and (v is not None) and
('@' not in v) and (v != '')):
v += '@' + hostname
val = '-' + val
if '=' in val:
if v is not None:
ret.append(val + str(v))
else:
ret.append(val)
else:
ret.append(val)
if v is not None:
ret.append(str(v))
# Executable and argument list must come last in a job submission
if ((op == IFL_SUBMIT) and (executable is not None)):
ret.append('--')
ret.append(executable)
if arglist is not None:
ret.append(arglist)
return ret
def filter_batch_status(self, bs, attrib):
"""
Filter out elements that don't have the attributes requested
This is needed to adapt to the fact that requesting a
resource attribute returns all ``'<resource-name>.*'``
attributes so we need to ensure that the specific resource
requested is present in the stat'ed object.
This is needed especially when calling expect with an op=NE
because we need to filter on objects that have exactly
the attributes requested
:param bs: Batch status
:param attrib: Requested attributes
:type attrib: str or dictionary
:returns: Filtered batch status
"""
if isinstance(attrib, dict):
keys = attrib.keys()
elif isinstance(attrib, str):
keys = attrib.split(',')
else:
keys = attrib
if keys:
del_indices = []
for idx in range(len(bs)):
for k in bs[idx].keys():
if '.' not in k:
continue
if k != 'id' and k not in keys:
del bs[idx][k]
# if no matching resources, remove the object
if len(bs[idx]) == 1:
del_indices.append(idx)
for i in sorted(del_indices, reverse=True):
del bs[i]
return bs
def convert_attributes_by_op(self, attributes, setattrs=False):
"""
Convert attributes by operator, i.e. convert an attribute
of the form
``<attr_name><op><value>`` (e.g. resources_available.ncpus>4)
to
``<attr_name>: (<op>, <value>)``
(e.g. resources_available.ncpus: (GT, 4))
:param attributes: the attributes to convert
:type attributes: List
:param setattrs: if True, set the attributes with no operator
as (SET, '')
:type setattrs: bool
:returns: Converted attributes by operator
"""
# the order of operator matters because they are used to search by
# regex so the longer strings to search must come first
operators = ('<=', '>=', '!=', '=', '>', '<', '~')
d = {}
for attr in attributes:
found = False
for op in operators:
if op in attr:
a = attr.split(op)
d[a[0]] = (PTL_STR_TO_OP[op], a[1])
found = True
break
if not found and setattrs:
d[attr] = (SET, '')
return d
def operator_in_attribute(self, attrib):
"""
Returns True if an operator string is present in an
attribute name
:param attrib: Attribute name
:type attrib: str
:returns: True or False
"""
operators = PTL_STR_TO_OP.keys()
for a in attrib:
for op in operators:
if op in a:
return True
return False
def list_resources(self, objtype=None, objs=[]):
"""
Lists the resources
:param objtype: Type of the object
:type objtype: str
:param objs: Object list
:type objs: List
:returns: List of resources
"""
if objtype in (VNODE, NODE, SERVER, QUEUE, SCHED):
prefix = 'resources_available.'
elif objtype in (JOB, RESV):
prefix = 'Resource_List.'
else:
return
resources = []
for o in objs:
for a in o.keys():
if a.startswith(prefix):
res = a.replace(prefix, '')
if res not in resources:
resources.append(res)
return resources
def compare(self, obj1, obj2, showdiff=False):
"""
Compare two objects.
:param showdiff: whether to print the specific differences,
defaults to False
:type showdiff: bool
:returns: 0 if objects are identical and non zero otherwise
"""
if not showdiff:
ret = cmp(obj1, obj2)
if ret != 0:
self.logger.info('objects differ')
return ret
if not isinstance(obj1, type(obj2)):
self.logger.error('objects are of different type')
return 1
if isinstance(obj1, list):
if len(obj1) != len(obj2):
self.logger.info(
'comparing ' + str(
obj1) + ' and ' + str(
obj2))
self.logger.info('objects are of different lengths')
return
for i in range(len(obj1)):
self.compare(obj1[i], obj2[i], showdiff=showdiff)
return
if isinstance(obj1, dict):
self.logger.info('comparing ' + str(obj1) + ' and ' + str(obj2))
onlyobj1 = []
diffobjs = []
onlyobj2 = []
for k1, v1 in obj1.items():
if k1 not in obj2:
onlyobj1.append(k1 + '=' + str(v1))
if k1 in obj2 and obj2[k1] != v1:
diffobjs.append(
k1 + '=' + str(v1) + ' vs ' + k1 + '=' + str(obj2[k1]))
for k2, v2 in obj2.items():
if k2 not in obj1:
onlyobj2.append(k2 + '=' + str(v2))
if len(onlyobj1) > 0:
self.logger.info("only in first object: " + " ".join(onlyobj1))
if len(onlyobj2) > 0:
self.logger.info(
"only in second object: " + " ".join(onlyobj2))
if len(diffobjs) > 0:
self.logger.info("diff between objects: " + " ".join(diffobjs))
if len(onlyobj1) == len(onlyobj2) == len(diffobjs) == 0:
self.logger.info("objects are identical")
return 0
return 1
@classmethod
def random_str(cls, length=1, prefix=''):
"""
Generates the random string
:param length: Length of the string
:type length: int
:param prefix: Prefix of the string
:type prefix: str
:returns: Random string
"""
r = [random.choice(string.letters) for _ in range(length)]
r = ''.join([prefix] + r)
if hasattr(cls, '__uniq_rstr'):
while r in cls.__uniq_rstr:
r = [random.choice(string.letters) for _ in range(length)]
r = ''.join([prefix] + r)
cls.__uniq_rstr.append(r)
else:
cls.__uniq_rstr = [r]
return r
def _make_template_formula(self, formula):
"""
Create a template of the formula
:param formula: Formula for which template is to be created
:type formula: str
:returns: Template
"""
tformula = []
skip = False
for c in formula:
if not skip and c.isalpha():
tformula.append('$')
skip = True
if c in ('+', '-', '/', ' ', '*', '%'):
skip = False
tformula.append(c)
return "".join(tformula)
def update_attributes_list(self, obj):
"""
Updates the attribute list
:param obj: Objects
:returns: Updated attribute list
"""
if not hasattr(obj, 'attributes'):
return
if not hasattr(obj, 'Resource_List'):
setattr(obj, 'Resource_List', {})
for attr, val in obj.attributes.items():
if attr.startswith('Resource_List.'):
(_, resource) = attr.split('.')
obj.Resource_List[resource] = val
def parse_fgc_limit(self, limstr=None):
"""
Parse an ``FGC`` limit entry, of the form:
``<limtype>[.<resource>]=\[<entity_type>:<entity_name>
=<entity_value>\]``
:param limstr: FGC limit string
:type limstr: str or None
:returns: Parsed FGC string in given format
"""
m = self.lim_tag.match(limstr)
if m:
_v = str(self.decode_value(m.group('entity_value')))
return (m.group('limtype'), m.group('resource'),
m.group('entity_type'), m.group('entity_name'), _v)
return None
def is_job_array(self, jobid):
"""
If a job array return True, otherwise return False
:param jobid: PBS jobid
:returns: True or False
"""
if self.array_tag.match(jobid):
return True
return False
def is_subjob(self, jobid):
"""
If a subjob of a job array, return the subjob id
otherwise return False
:param jobid: PBS job id
:type jobid: str
:returns: True or False
"""
m = self.subjob_tag.match(jobid)
if m:
return m.group('subjobid')
return False
class PbsTypeFGCLimit(object):
"""
FGC limit entry, of the form:
``<limtype>[.<resource>]=\[<entity_type>:<entity_name>=
<entity_value>\]``
:param attr: FGC limit attribute
:type attr: str
:param value: Value of attribute
:type value: int
:returns: FGC limit entry of given format
"""
fgc_attr_pat = re.compile("(?P<ltype>[a-z_]+)[\.]*(?P<resource>[\w\d-]*)")
fgc_val_pat = re.compile("[\s]*\[(?P<etype>[ugpo]):(?P<ename>[\w\d-]+)"
"=(?P<eval>[\d]+)\][\s]*")
utils = BatchUtils()
def __init__(self, attr, val):
self.attr = attr
self.val = val
a = self.fgc_attr_pat.match(attr)
if a:
self.limit_type = a.group('ltype')
self.resource_name = a.group('resource')
else:
self.limit_type = None
self.resource_name = None
v = self.fgc_val_pat.match(val)
if v:
self.lim_value = self.utils.decode_value(v.group('eval'))
self.entity_type = v.group('etype')
self.entity_name = v.group('ename')
else:
self.lim_value = None
self.entity_type = None
self.entity_name = None
def __val__(self):
return ('[' + str(self.entity_type) + ':' +
str(self.entity_name) + '=' + str(self.lim_value) + ']')
def __str__(self):
return (self.attr + ' = ' + self.__val__())
class PbsBatchStatus(list):
"""
Wrapper class for Batch Status object
Converts a batch status (as dictlist) into a list of
PbsBatchObjects
:param bs: Batch status
:type bs: List or dictionary
:returns: List of PBS batch objects
"""
def __init__(self, bs):
if not isinstance(bs, (list, dict)):
raise TypeError("Expected a list or dictionary")
if isinstance(bs, dict):
self.append(PbsBatchObject(bs))
else:
for b in bs:
self.append(PbsBatchObject(b))
def __str__(self):
rv = []
for l in self.__bs:
rv += [self.__bu.batch_status_as_dict_to_str(l)]
return "\n".join(rv)
class PbsBatchObject(list):
def __init__(self, bs):
self.set_batch_status(bs)
def set_batch_status(self, bs):
"""
Sets the batch status
:param bs: Batch status
"""
if 'id' in bs:
self.name = bs['id']
for k, v in bs.items():
self.append(PbsAttribute(k, v))
class PbsAttribute(object):
"""
Descriptor class for PBS attribute
:param name: PBS attribute name
:type name: str
:param value: Value for the attribute
:type value: str or int or float
"""
utils = BatchUtils()
def __init__(self, name=None, value=None):
self.set_name(name)
self.set_value(value)
def set_name(self, name):
"""
Set PBS attribute name
:param name: PBS attribute
:type name: str
"""
self.name = name
if name is not None and '.' in name:
self.is_resource = True
self.resource_type, self.resource_name = self.name.split('.')
else:
self.is_resource = False
self.resource_type = self.resource_name = None
def set_value(self, value):
"""
Set PBS attribute value
:param value: Value of PBS attribute
:type value: str or int or float
"""
self.value = value
if isinstance(value, (int, float)) or str(value).isdigit():
self.is_consumable = True
else:
self.is_consumable = False
def obfuscate_name(self, a=None):
"""
Obfuscate PBS attribute name
"""
if a is not None:
on = a
else:
on = self.utils.random_str(len(self.name))
self.decoded_name = self.name
if self.is_resource:
self.set_name(self.resource_name + '.' + on)
def obfuscate_value(self, v=None):
"""
Obfuscate PBS attribute value
"""
if not self.is_consuable:
self.decoded_value = self.value
return
if v is not None:
ov = v
else:
ov = self.utils.random_str(len(self.value))
self.decoded_value = self.value
self.set_value(ov)
class PbsAnonymizer(object):
"""
Holds and controls anonymizing operations of PBS data
When a dictionary, the values associated to each key
is substituted during obfuscation.
The anonymizer operates on attributes or resources.
Resources operate on the resource name itself rather than
the entire name, for example,to obfuscate the values associated
to a custom resource "foo" that could be set as resources_available.
foo resources_default.foo or Resource_List.foo, all that needs to be
passed in to the function is "foo" in the resc_vals list.
:param attr_key: Attribute key
:type attr_key: str or None
:param attr_val: Attribute value
:type attr_val: str or None
:param resc_key: Resource key
:type resc_key: str or None
:param resc_val: Resource value
:type resc_val: str or None
"""
logger = logging.getLogger(__name__)
utils = BatchUtils()
du = DshUtils()
def __init__(self, attr_delete=None, resc_delete=None,
attr_key=None, attr_val=None,
resc_key=None, resc_val=None):
# special cases
self._entity = False
self.job_sort_formula = None
self.schedselect = None
self.select = None
self.set_attr_delete(attr_delete)
self.set_resc_delete(resc_delete)
self.set_attr_key(attr_key)
self.set_attr_val(attr_val)
self.set_resc_key(resc_key)
self.set_resc_val(resc_val)
self.anonymize = self.anonymize_batch_status
# global anonymized mapping data
self.gmap_attr_val = {}
self.gmap_resc_val = {}
self.gmap_attr_key = {}
self.gmap_resc_key = {}
def _initialize_key_map(self, keys):
k = {}
if keys is not None:
if isinstance(keys, dict):
return keys
elif isinstance(keys, list):
for i in keys:
k[i] = None
elif isinstance(keys, str):
for i in keys.split(','):
k[i] = None
else:
self.logger.error('unhandled map type')
k = {None: None}
return k
def _initialize_value_map(self, keys):
k = {}
if keys is not None:
if isinstance(keys, dict):
return keys
elif isinstance(keys, list):
for i in keys:
k[i] = {}
elif isinstance(keys, str):
for i in keys.split(','):
k[i] = {}
else:
self.logger.error('unhandled map type')
k = {None: None}
return k
def set_attr_delete(self, ad):
"""
Name of attributes to delete
:param ad: Attributes to delete
:type ad: str or list or dictionary
"""
self.attr_delete = self._initialize_value_map(ad)
def set_resc_delete(self, rd):
"""
Name of resources to delete
:param rd: Resources to delete
:type rd: str or list or dictionary
"""
self.resc_delete = self._initialize_value_map(rd)
def set_attr_key(self, ak):
"""
Name of attributes to obfuscate.
:param ak: Attribute keys
:type ak: str or list or dictionary
"""
self.attr_key = self._initialize_key_map(ak)
def set_attr_val(self, av):
"""
Name of attributes for which to obfuscate the value
:param av: Attributes value to obfuscate
:type av: str or list or dictionary
"""
self.attr_val = self._initialize_value_map(av)
if 'euser' in self.attr_val:
self._entity = True
elif 'egroup' in self.attr_val:
self._entity = True
elif 'project' in self.attr_val:
self._entity = True
def set_resc_key(self, rk):
"""
Name of resources to obfuscate
:param rk: Resource key
:type rk: str or list or dictionary
"""
self.resc_key = self._initialize_key_map(rk)
def set_resc_val(self, rv):
"""
Name of resources for which to obfuscate the value
:param rv: Resource value to obfuscate
:type rv: str or list or dictionary
"""
self.resc_val = self._initialize_value_map(rv)
def set_anon_map_file(self, name):
"""
Name of file in which to store anonymized map data.
This file is meant to remain private to a site as it
contains the sensitive anonymized data.
:param name: Name of file to which anonymized data to store.
:type name: str
"""
self.anon_map_file = name
def anonymize_resource_group(self, file):
"""
Anonymize the user and group fields of a resource
group file
:param file: Resource group file
:type file: str
"""
anon_rg = []
try:
f = open(file)
lines = f.readlines()
f.close()
except:
self.logger.error("Error processing " + file)
return None
for data in lines:
data = data.strip()
if data:
if data[0] == '#':
continue
_d = data.split()
ug = _d[0]
if ':' in ug:
(euser, egroup) = ug.split(':')
else:
euser = ug
egroup = None
if 'euser' not in self.attr_val:
anon_euser = euser
else:
anon_euser = None
if 'euser' in self.gmap_attr_val:
if euser in self.gmap_attr_val['euser']:
anon_euser = self.gmap_attr_val['euser'][euser]
else:
self.gmap_attr_val['euser'] = {}
if euser is not None and anon_euser is None:
anon_euser = self.utils.random_str(len(euser))
self.gmap_attr_val['euser'][euser] = anon_euser
if 'egroup' not in self.attr_val:
anon_egroup = egroup
else:
anon_egroup = None
if egroup is not None:
if 'egroup' in self.gmap_attr_val:
if egroup in self.gmap_attr_val['egroup']:
anon_egroup = (self.gmap_attr_val['egroup']
[egroup])
else:
self.gmap_attr_val['egroup'] = {}
if egroup is not None and anon_egroup is None:
anon_egroup = self.utils.random_str(len(egroup))
self.gmap_attr_val['egroup'][egroup] = anon_egroup
# reconstruct the fairshare info by combining euser and egroup
out = [anon_euser]
if anon_egroup is not None:
out[0] += ':' + anon_egroup
# and appending the rest of the original line
out.append(_d[1])
if len(_d) > 1:
p = _d[2].strip()
if ('euser' in self.gmap_attr_val and
p in self.gmap_attr_val['euser']):
out.append(self.gmap_attr_val['euser'][p])
else:
out.append(_d[2])
if len(_d) > 2:
out += _d[3:]
anon_rg.append(" ".join(out))
return anon_rg
def anonymize_resource_def(self, resources):
"""
Anonymize the resource definition
"""
if not self.resc_key:
return resources
for curr_anon_resc, val in self.resc_key.items():
if curr_anon_resc in resources:
tmp_resc = copy.copy(resources[curr_anon_resc])
del resources[curr_anon_resc]
if val is None:
if curr_anon_resc in self.gmap_resc_key:
val = self.gmap_resc_key[curr_anon_resc]
else:
val = self.utils.random_str(len(curr_anon_resc))
elif curr_anon_resc not in self.gmap_resc_key:
self.gmap_resc_key[curr_anon_resc] = val
tmp_resc.set_name(val)
resources[val] = tmp_resc
return resources
def __anonymize_fgc(self, d, attr, ar, name, val):
"""
Anonymize an FGC limit value
"""
m = {'u': 'euser', 'g': 'egroup', 'p': 'project'}
if ',' in val:
fgc_lim = val.split(',')
else:
fgc_lim = [val]
nfgc = []
for lim in fgc_lim:
_fgc = PbsTypeFGCLimit(attr, lim)
ename = _fgc.entity_name
if ename in ('PBS_GENERIC', 'PBS_ALL'):
nfgc.append(lim)
continue
obf_ename = ename
for etype, nm in m.items():
if _fgc.entity_type == etype:
if nm not in self.gmap_attr_val:
if nm in ar and ename in ar[nm]:
obf_ename = ar[nm][ename]
else:
obf_ename = self.utils.random_str(len(ename))
self.gmap_attr_val[nm] = {ename: obf_ename}
elif ename in self.gmap_attr_val[nm]:
if ename in self.gmap_attr_val[nm]:
obf_ename = self.gmap_attr_val[nm][ename]
break
_fgc.entity_name = obf_ename
nfgc.append(_fgc.__val__())
d[attr] = ",".join(nfgc)
def __anonymize_attr_val(self, d, attr, ar, name, val):
"""
Obfuscate an attribute/resource values
"""
# don't obfuscate default project
if attr == 'project' and val == '_pbs_project_default':
return
nstr = []
if '.' in attr:
m = self.gmap_resc_val
else:
m = self.gmap_attr_val
if val in ar[name]:
nstr.append(ar[name][val])
if name in self.lmap:
self.lmap[name][val] = ar[name][val]
else:
self.lmap[name] = {val: ar[name][val]}
if name not in m:
m[name] = {val: ar[name][val]}
elif val not in m[name]:
m[name][val] = ar[name][val]
else:
# Obfuscate by randomizing with a value of the same length
tmp_v = val.split(',')
for v in tmp_v:
if v in ar[name]:
r = ar[name][v]
elif name in m and v in m[name]:
r = m[name][v]
else:
r = self.utils.random_str(len(v))
if not isinstance(ar[name], dict):
ar[name] = {}
ar[name][v] = r
self.lmap[name] = {v: r}
if name not in m:
m[name] = {v: r}
elif v not in m[name]:
m[name][v] = r
nstr.append(r)
if d is not None:
d[attr] = ",".join(nstr)
def __anonymize_attr_key(self, d, attr, ar, name, res):
"""
Obfuscate an attribute/resource key
"""
if res is not None:
m = self.gmap_resc_key
else:
m = self.gmap_attr_key
if not ar[name]:
if name in m:
ar[name] = m[name]
else:
randstr = self.utils.random_str(len(name))
ar[name] = randstr
m[name] = randstr
if d is not None:
tmp_val = d[attr]
del d[attr]
if res is not None:
d[res + '.' + ar[name]] = tmp_val
else:
d[ar[name]] = tmp_val
if name not in self.lmap:
self.lmap[name] = ar[name]
if name not in m:
m[name] = ar[name]
def anonymize_batch_status(self, data=None):
"""
Anonymize arbitrary batch_status data
:param data: Batch status data
:type data: List or dictionary
"""
if not isinstance(data, (list, dict)):
self.logger.error('data expected to be dict or list')
return None
if isinstance(data, dict):
dat = [data]
else:
dat = data
# Local mapping data used to store obfuscation mapping data for this
# specific item, d
self.lmap = {}
# loop over each "batch_status" entry to obfuscate
for d in dat:
if self.attr_delete is not None:
for todel in self.attr_delete:
if todel in d:
del d[todel]
if self.resc_delete is not None:
for todel in self.resc_delete:
for tmpk, _ in d.items():
if '.' in tmpk and todel == tmpk.split('.')[1]:
del d[tmpk]
# Loop over each object's attributes, this is where the special
# cases are handled (e.g., FGC limits, formula, select spec...)
for attr in d:
val = d[attr]
if '.' in attr:
(res_type, res_name) = attr.split('.')
else:
res_type = None
res_name = attr
if res_type is not None:
if self._entity and attr.startswith('max_run'):
self.__anonymize_fgc(d, attr, self.attr_val,
attr, val)
if res_name in self.resc_val:
if attr.startswith('max_run'):
self.__anonymize_fgc(d, attr, self.attr_val,
attr, val)
self.__anonymize_attr_val(d, attr, self.resc_val,
res_name, val)
if res_name in self.resc_key:
self.__anonymize_attr_key(d, attr, self.resc_key,
res_name, res_type)
else:
if attr in self.attr_val:
self.__anonymize_attr_val(d, attr, self.attr_val,
attr, val)
if attr in self.attr_key:
self.__anonymize_attr_key(d, attr, self.attr_key,
attr, None)
if ((attr in ('job_sort_formula', 'schedselect',
'select')) and self.resc_key):
for r in self.resc_key:
if r in val:
if r not in self.gmap_resc_key:
self.gmap_resc_key[
r] = self.utils.random_str(len(r))
val = val.replace(r, self.gmap_resc_key[r])
setattr(self, attr, val)
d[attr] = val
def anonymize_file(self, filename, extension='.anon', inplace=False):
"""
Replace every occurrence of any entry in the global
map for the given file by its anonymized values.
Returns a file named after the original file with the
extension suffix,If inplace is True returns the original
file name for which contents have been replaced
:param filename: Name of the file to anonymize
:type filename: str
:param extension: Extension of the anonymized file
:type extension: str
:param inplace: If true returns the original file name for
which contents have been replaced
:type inplace: bool
"""
if not inplace:
fn = (filename + extension)
nf = open(fn, 'w')
else:
(_, fn) = self.du.mkstemp()
nf = open(fn, "w")
f = open(filename)
for data in f:
for k in self.attr_key:
if k in data:
if k not in self.gmap_attr_key:
ak = self.utils.random_str(len(k))
self.gmap_attr_key[k] = ak
else:
k = self.gmap_attr_key[k]
data = data.replace(k, self.gmap_attr_key[k])
for k in self.resc_key:
if k not in self.gmap_resc_key:
rk = self.utils.random_str(len(k))
self.gmap_resc_key[k] = rk
else:
rk = self.gmap_resc_key[k]
data = data.replace(k, self.gmap_resc_key[k])
for ak, av in self.gmap_attr_val.items():
for k, v in av.items():
data = data.replace(k, v)
for ak, av in self.gmap_resc_val.items():
for k, v in av.items():
data = data.replace(k, v)
nf.write(data)
nf.close()
f.close()
if inplace:
self.du.run_cmd(cmd=['mv', fn, filename])
return filename
return fn
def anonymize_accounting_log(self, logfile):
"""
Anonymize the accounting log
:param logfile: Acconting log file
:type logfile: str
"""
try:
f = open(logfile)
except:
self.logger.error("Error processing " + logfile)
return None
if 'euser' in self.attr_val:
self.attr_val['user'] = self.attr_val['euser']
self.attr_val['requestor'] = self.attr_val['euser']
if 'egroup' in self.attr_val:
self.attr_val['group'] = self.attr_val['egroup']
if 'euser' in self.gmap_attr_val:
self.gmap_attr_val['user'] = self.gmap_attr_val['euser']
if 'egroup' in self.gmap_attr_val:
self.gmap_attr_val['group'] = self.gmap_attr_val['egroup']
anon_data = []
for data in f:
# accounting log format is
# %Y/%m/%d %H:%M:%S;<Key>;<Id>;<key1=val1> <key2=val2> ...
curr = data.split(';', 3)
if curr[1] in ('A', 'L'):
anon_data.append(data.strip())
continue
buf = curr[3].strip().split(' ')
# Split the attribute list into key value pairs
kvl = map(lambda n: n.split('=', 1), buf)
for i in range(len(kvl)):
k, v = kvl[i]
if k == 'requestor':
if '@' in v:
(v, host) = v.split('@', 1)
if k in self.attr_val:
if k == 'project' and v == '_pbs_project_default':
continue
anon_kv = None
if k in self.gmap_attr_val:
if v in self.gmap_attr_val[k]:
anon_kv = self.gmap_attr_val[k][v]
else:
self.gmap_attr_val[k] = {}
if anon_kv is None:
anon_kv = self.utils.random_str(len(v))
self.gmap_attr_val[k][v] = anon_kv
kvl[i][1] = anon_kv
# append server from where request was made
if k == 'requestor':
kvl[i][1] += '@' + host
if k in self.attr_key:
if k in self.gmap_attr_key:
anon_ak = self.gmap_resc_key[k]
else:
anon_ak = self.utils.random_str(len(k))
self.gmap_attr_key[k] = anon_ak
kvl[i][0] = anon_ak
if '.' in k:
restype, resname = k.split('.')
for rv in self.resc_val:
if resname == rv:
anon_rv = None
if resname in self.gmap_resc_val:
if v in self.gmap_resc_val[resname]:
anon_rv = self.gmap_resc_val[resname][v]
else:
self.gmap_resc_val[resname] = {}
if anon_rv is None:
anon_rv = self.utils.random_str(len(v))
self.gmap_resc_val[resname][v] = anon_rv
kvl[i][1] = anon_rv
if resname in self.resc_key:
if resname in self.gmap_resc_key:
anon_rk = self.gmap_resc_key[resname]
else:
anon_rk = self.utils.random_str(len(resname))
self.gmap_resc_key[resname] = anon_rk
kvl[i][0] = restype + '.' + anon_rk
anon_data.append(";".join(curr[:3]) + ";" +
" ".join(map(lambda n: "=".join(n), kvl)))
f.close()
return anon_data
def anonymize_sched_config(self, scheduler):
"""
Anonymize the scheduler config
:param scheduler: PBS scheduler object
"""
if len(self.resc_key) == 0:
return
# when anonymizing we get rid of the comments as they may contain
# sensitive information
scheduler._sched_config_comments = {}
# If resources need to be anonymized then update the resources line
# job_sort_key and node_sort_key
sr = scheduler.get_resources()
if sr:
for i in range(0, len(sr)):
if sr[i] in self.resc_key:
if sr[i] in self.gmap_resc_key:
sr[i] = self.gmap_resc_key[sr[i]]
else:
anon_res = self.utils.random_str(len(sr[i]))
self.gmap_resc_key[sr[i]] = anon_res
sr[i] = anon_res
scheduler.sched_config['resources'] = ",".join(sr)
for k in ['job_sort_key', 'node_sort_key']:
if k in scheduler.sched_config:
sc_jsk = scheduler.sched_config[k]
if not isinstance(sc_jsk, list):
sc_jsk = list(sc_jsk)
for r in self.resc_key:
for i in range(len(sc_jsk)):
if r in sc_jsk[i]:
sc_jsk[i] = sc_jsk[i].replace(r, self.resc_key[r])
def __str__(self):
return ("Attributes Values: " + str(self.gmap_attr_val) + "\n" +
"Resources Values: " + str(self.gmap_resc_val) + "\n" +
"Attributes Keys: " + str(self.gmap_attr_key) + "\n" +
"Resources Keys: " + str(self.gmap_resc_key))
class Entity(object):
"""
Abstract representation of a PBS consumer that has an
external relationship to the PBS system. For example, a
user associated to an OS identifier (uid) maps to a PBS
user entity.
Entities may be subject to policies, such as limits, consume
a certain amount of resource and/or fairshare usage.
:param etype: Entity type
:type etype: str or None
:param name: Entity name
:type name: str or None
"""
def __init__(self, etype=None, name=None):
self.type = etype
self.name = name
self.limits = []
self.resource_usage = {}
self.fairshare_usage = 0
def set_limit(self, limit=None):
"""
:param limit: Limit to be set
:type limit: str or None
"""
for l in self.limits:
if str(limit) == str(l):
return
self.limits.append(limit)
def set_resource_usage(self, container=None, resource=None, usage=None):
"""
Set the resource type
:param resource: PBS resource
:type resource: str or None
:param usage: Resource usage value
:type usage: str or None
"""
if self.type:
if container in self.resource_usage:
if self.resource_usage[self.type]:
if resource in self.resource_usage[container]:
self.resource_usage[container][resource] += usage
else:
self.resource_usage[container][resource] = usage
else:
self.resource_usage[container] = {resource: usage}
def set_fairshare_usage(self, usage=0):
"""
Set fairshare usage
:param usage: Fairshare usage value
:type usage: int
"""
self.fairshare_usage += usage
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.limits) + ' ' + str(self.resource_usage) + ' ' + \
str(self.fairshare_usage)
class Policy(object):
"""
Abstract PBS policy. Can be one of ``limits``,
``access control``, ``scheduling policy``, etc...this
class does not currently support any operations
"""
def __init__(self):
pass
class Limit(Policy):
"""
Representation of a PBS limit
Limits apply to containers, are of a certain type
(e.g., max_run_res.ncpus) associated to a given resource
(e.g., resource), on a given entity (e.g.,user Bob) and
have a certain value.
:param limit_type: Type of the limit
:type limit_type: str or None
:param resource: PBS resource
:type resource: str or None
:param entity_obj: Entity object
:param value: Limit value
:type value: int
"""
def __init__(self, limit_type=None, resource=None,
entity_obj=None, value=None, container=None,
container_id=None):
self.set_container(container, container_id)
self.soft_limit = False
self.hard_limit = False
self.set_limit_type(limit_type)
self.set_resource(resource)
self.set_value(value)
self.entity = entity_obj
def set_container(self, container, container_id):
"""
Set the container
:param container: Container which is to be set
:type container: str
:param container_id: Container id
"""
self.container = container
self.container_id = container_id
def set_limit_type(self, t):
"""
Set the limit type
:param t: Limit type
:type t: str
"""
self.limit_type = t
if '_soft' in t:
self.soft_limit = True
else:
self.hard_limit = True
def set_resource(self, resource):
"""
Set the resource
:param resource: resource value to set
:type resource: str
"""
self.resource = resource
def set_value(self, value):
"""
Set the resource value
:param value: Resource value
:type value: str
"""
self.value = value
def __eq__(self, value):
if str(self) == str(value):
return True
return False
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [self.container_id, self.limit_type, self.resource, '[',
self.entity.type, ':', self.entity.name, '=', self.value, ']']
return " ".join(l)
class ExpectActions(object):
"""
List of action handlers to run when Server's expect
function does not get the expected result
:param action: Action to run
:type action: str
:param level: Logging level
"""
actions = {}
def __init__(self, action=None, level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.add_action(action, level=level)
def add_action(self, action=None, hostname=None, level=logging.INFO):
"""
Add an action
:param action: Action to add
:param hostname: Machine hostname
:type hostname: str
:param level: Logging level
"""
if action is not None and action.name is not None and\
action.name not in self.actions:
self.actions[action.name] = action
msg = ['expect action: added action ' + action.name]
if hostname:
msg += [' to server ' + hostname]
if level >= logging.INFO:
self.logger.info("".join(msg))
else:
self.logger.debug("".join(msg))
def has_action(self, name):
"""
check whether action exists or not
:param name: Name of action
:type name: str
"""
if name in self.actions:
return True
return False
def get_action(self, name):
"""
Get an action if exists
:param name: Name of action
:type name: str
"""
if name in self.actions:
return self.actions[name]
return None
def list_actions(self, level=logging.INFO):
"""
List an actions
:param level: Logging level
"""
if level >= logging.INFO:
self.logger.info(self.get_all_cations)
else:
self.logger.debug(self.get_all_cations)
def get_all_actions(self):
"""
Get all the action
"""
return self.actions.values()
def get_actions_by_type(self, atype=None):
"""
Get an action by type
:param atype: Action type
:type atype: str
"""
if atype is None:
return None
ret_actions = []
for action in self.actions.values():
if action.type is not None and action.type == atype:
ret_actions.append(action)
return ret_actions
def _control_action(self, action=None, name=None, enable=None):
if action:
action.enabled = False
name = action.name
elif name is not None:
if name == 'ALL':
for a in self.actions:
a.enabled = enable
else:
a = self.get_action(name)
a.enabled = False
else:
return
if enable:
msg = 'enabled'
else:
msg = 'disabled'
self.logger.info('expect action: ' + name + ' ' + msg)
def disable_action(self, action=None, name=None):
"""
Disable an action
"""
self._control_action(action, name, enable=False)
def enable_action(self, action=None, name=None):
"""
Enable an action
"""
self._control_action(action, name, enable=True)
def disable_all_actions(self):
"""
Disable all actions
"""
for a in self.actions.values():
a.enabled = False
def enable_all_actions(self):
"""
Enable all actions
"""
for a in self.actions.values():
a.enabled = True
class ExpectAction(object):
"""
Action function to run when Server's expect function does
not get the expected result
:param atype: Action type
:type atype: str
"""
def __init__(self, name=None, enabled=True, atype=None, action=None,
level=logging.INFO):
self.logger = logging.getLogger(__name__)
self.set_name(name, level=level)
self.set_enabled(enabled)
self.set_type(atype)
self.set_action(action)
def set_name(self, name, level=logging.INFO):
"""
Set the actione name
:param name: Action name
:type name: str
"""
if level >= logging.INFO:
self.logger.info('expect action: created new action ' + name)
else:
self.logger.debug('expect action: created new action ' + name)
self.name = name
def set_enabled(self, enabled):
self.enabled = enabled
def set_type(self, atype):
self.type = atype
def set_action(self, action):
self.action = action
class PbsTypeAttribute(dict):
"""
Experimental. This is a placeholder object that will be used
in the future to map attribute information and circumvent
the error-pron dynamic type detection that is currently done
using ``decode_value()``
"""
def __getitem__(self, name):
return BatchUtils.decode_value(super(PbsTypeAttribute,
self).__getitem__(name))
class PBSObject(object):
"""
Generic PBS Object encapsulating attributes and defaults
:param name: The name associated to the object
:type name: str
:param attrs: Dictionary of attributes to set on object
:type attrs: Dictionary
:param defaults: Dictionary of default attributes. Setting
this will override any other object's default
:type defaults: Dictionary
"""
utils = BatchUtils()
platform = sys.platform
def __init__(self, name, attrs={}, defaults={}):
self.attributes = OrderedDict()
self.name = name
self.dflt_attributes = defaults
self.attropl = None
self.custom_attrs = OrderedDict()
self.ctime = int(time.time())
self.set_attributes(attrs)
def set_attributes(self, a={}):
"""
set attributes and custom attributes on this object.
custom attributes are used when converting attributes
to CLI
:param a: Attribute dictionary
:type a: Dictionary
"""
if isinstance(a, list):
a = OrderedDict(a)
self.attributes = OrderedDict(self.dflt_attributes.items() +
self.attributes.items() + a.items())
self.custom_attrs = OrderedDict(self.custom_attrs.items() +
a.items())
def unset_attributes(self, attrl=[]):
"""
Unset attributes from object's attributes and custom
attributes
:param attrl: Attribute list
:type attrl: List
"""
for attr in attrl:
if attr in self.attributes:
del self.attributes[attr]
if attr in self.custom_attrs:
del self.custom_attrs[attr]
def __str__(self):
"""
Return a string representation of this PBSObject
"""
if self.name is None:
return ""
s = []
if isinstance(self, Job):
s += ["Job Id: " + self.name + "\n"]
elif isinstance(self, Queue):
s += ["Queue: " + self.name + "\n"]
elif isinstance(self, Server):
s += ["Server: " + self.hostname + "\n"]
elif isinstance(self, Reservation):
s += ["Name: " + "\n"]
else:
s += [self.name + "\n"]
for k, v in self.attributes.items():
s += [" " + k + " = " + str(v) + "\n"]
return "".join(s)
def __repr__(self):
return str(self.attributes)
class PBSService(PBSObject):
"""
Generic PBS service object to hold properties of PBS daemons
:param name: The name associated to the object
:type name: str or None
:param attrs: Dictionary of attributes to set on object
:type attrs: Dictionary
:param defaults: Dictionary of default attributes. Setting
this will override any other object's default
:type defaults: Dictionary
:param pbsconf_file: Optional path to the pbs configuration
file
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory
(This will overrides diagmap)
:type diag: str or None
"""
du = DshUtils()
pu = ProcUtils()
def __init__(self, name=None, attrs={}, defaults={}, pbsconf_file=None,
diagmap={}, diag=None):
if name is None:
self.hostname = socket.gethostname()
else:
self.hostname = name
if diag:
self.diagmap = self._load_from_diag(diag)
self.has_diag = True
self.diag = diag
elif len(diagmap) > 0:
self.diagmap = diagmap
self.diag = None
self.has_diag = True
else:
self.diagmap = {}
self.diag = None
self.has_diag = False
if not self.has_diag:
try:
self.fqdn = socket.gethostbyaddr(self.hostname)[0]
if self.hostname != self.fqdn:
self.logger.info('FQDN name ' + self.fqdn + ' differs '
'from name provided ' + self.hostname)
self.hostname = self.fqdn
except:
pass
else:
self.fqdn = self.hostname
self.shortname = self.hostname.split('.')[0]
self.logutils = None
self.logfile = None
self.acctlogfile = None
self.pid = None
self.pbs_conf = {}
self.pbs_env = {}
self._is_local = True
self.launcher = None
PBSObject.__init__(self, name, attrs, defaults)
if not self.has_diag:
if not self.du.is_localhost(self.hostname):
self._is_local = False
if pbsconf_file is None and not self.has_diag:
self.pbs_conf_file = self.du.get_pbs_conf_file(name)
else:
self.pbs_conf_file = pbsconf_file
if self.pbs_conf_file == '/etc/pbs.conf':
self.default_pbs_conf = True
elif (('PBS_CONF_FILE' not in os.environ) or
(os.environ['PBS_CONF_FILE'] != self.pbs_conf_file)):
self.default_pbs_conf = False
else:
self.default_pbs_conf = True
# default pbs_server_name to hostname, it will get set again once the
# config file is processed
self.pbs_server_name = self.hostname
# If diag is given then bypass parsing pbs.conf
if self.has_diag:
if diag is None:
t = 'pbs_diag_%s' % (time.strftime("%y%m%d_%H%M%S"))
self.diag = os.path.join(self.du.get_tempdir(), t)
self.pbs_conf['PBS_HOME'] = self.diag
self.pbs_conf['PBS_EXEC'] = self.diag
self.pbs_conf['PBS_SERVER'] = self.hostname
m = re.match('.*pbs_diag_(?P<datetime>\d{6,6}_\d{6,6}).*',
self.diag)
if m:
tm = time.strptime(m.group('datetime'), "%y%m%d_%H%M%S")
self.ctime = int(time.mktime(tm))
else:
self.pbs_conf = self.du.parse_pbs_config(self.hostname,
self.pbs_conf_file)
if self.pbs_conf is None or len(self.pbs_conf) == 0:
self.pbs_conf = {'PBS_HOME': "", 'PBS_EXEC': ""}
else:
ef = os.path.join(self.pbs_conf['PBS_HOME'], 'pbs_environment')
self.pbs_env = self.du.parse_pbs_environment(self.hostname, ef)
self.pbs_server_name = self.du.get_pbs_server_name(
self.pbs_conf)
self.init_logfile_path(self.pbs_conf)
def _load_from_diag(self, diag):
diagmap = {}
diagmap[SERVER] = os.path.join(diag, 'qstat_Bf.out')
diagmap[VNODE] = os.path.join(diag, 'pbsnodes_va.out')
diagmap[QUEUE] = os.path.join(diag, 'qstat_Qf.out')
diagmap[JOB] = os.path.join(diag, 'qstat_tf.out')
if not os.path.isfile(diagmap[JOB]):
diagmap[JOB] = os.path.join(diag, 'qstat_f.out')
diagmap[RESV] = os.path.join(diag, 'pbs_rstat_f.out')
diagmap[SCHED] = os.path.join(diag, 'qmgr_psched.out')
diagmap[HOOK] = []
if (os.path.isdir(os.path.join(diag, 'server_priv')) and
os.path.isdir(os.path.join(diag, 'server_priv', 'hooks'))):
_ld = os.listdir(os.path.join(diag, 'server_priv', 'hooks'))
for f in _ld:
if f.endswith('.HK'):
diagmap[HOOK].append(
os.path.join(diag, 'server_priv', 'hooks', f))
# Format of qmgr_psched.out differs from Batch Status, we transform
# it to go through the common batch status parsing routines
if os.path.isfile(diagmap[SCHED]):
f = open(os.path.join(diag, 'ptl_qstat_Sched.out'), 'w')
lines = open(diagmap[SCHED])
f.write("Sched \n")
for l in lines:
recs = l.split()
f.write("".join(recs[2:5]) + "\n")
f.close()
diagmap[SCHED] = os.path.join(diag, 'ptl_qstat_Sched.out')
else:
diagmap[SCHED] = None
return diagmap
def init_logfile_path(self, conf=None):
"""
Initialize path to log files for this service
:param conf: PBS conf file parameters
:type conf: Dictionary
"""
elmt = self._instance_to_logpath(self)
if elmt is None:
return
if conf is not None and 'PBS_HOME' in conf:
tm = time.strftime("%Y%m%d", time.localtime())
self.logfile = os.path.join(conf['PBS_HOME'], elmt, tm)
self.acctlogfile = os.path.join(conf['PBS_HOME'], 'server_priv',
'accounting', tm)
def _instance_to_logpath(self, inst):
"""
returns the log path associated to this service
"""
if isinstance(inst, Scheduler):
logval = 'sched_logs'
elif isinstance(inst, Server):
logval = 'server_logs'
elif isinstance(inst, MoM):
logval = 'mom_logs'
elif isinstance(inst, Comm):
logval = 'comm_logs'
else:
logval = None
return logval
def _instance_to_cmd(self, inst):
"""
returns the command associated to this service
"""
if isinstance(inst, Scheduler):
cmd = 'pbs_sched'
elif isinstance(inst, Server):
cmd = 'pbs_server'
elif isinstance(inst, MoM):
cmd = 'pbs_mom'
elif isinstance(inst, Comm):
cmd = 'pbs_comm'
else:
cmd = None
return cmd
def _instance_to_servicename(self, inst):
"""
return the service name associated to the instance. One of
``server, scheduler, or mom.``
"""
if isinstance(inst, Scheduler):
nm = 'scheduler'
elif isinstance(inst, Server):
nm = 'server'
elif isinstance(inst, MoM):
nm = 'mom'
elif isinstance(inst, Comm):
nm = 'comm'
else:
nm = ''
return nm
def _instance_to_privpath(self, inst):
"""
returns the path to priv associated to this service
"""
if isinstance(inst, Scheduler):
priv = 'sched_priv'
elif isinstance(inst, Server):
priv = 'server_priv'
elif isinstance(inst, MoM):
priv = 'mom_priv'
elif isinstance(inst, Comm):
priv = 'server_priv'
else:
priv = None
return priv
def _instance_to_lock(self, inst):
"""
returns the path to lock file associated to this service
"""
if isinstance(inst, Scheduler):
lock = 'sched.lock'
elif isinstance(inst, Server):
lock = 'server.lock'
elif isinstance(inst, MoM):
lock = 'mom.lock'
elif isinstance(inst, Comm):
lock = 'comm.lock'
else:
lock = None
return lock
def set_launcher(self, execargs=None):
self.launcher = execargs
def _isUp(self, inst):
"""
returns True if service is up and False otherwise
"""
live_pids = self._all_instance_pids(inst)
pid = self._get_pid(inst)
if live_pids is not None and pid in live_pids:
return True
return False
def _signal(self, sig, inst=None, procname=None):
"""
Send signal ``sig`` to service. sig is the signal name
as it would be sent to the program kill, e.g. -HUP.
Return the ``out/err/rc`` from the command run to send
the signal. See DshUtils.run_cmd
:param inst: Instance
:type inst: str
:param procname: Process name
:type procname: str or None
"""
pid = None
if inst is not None:
if inst.pid is not None:
pid = inst.pid
else:
pid = self._get_pid(inst)
if procname is not None:
pi = self.pu.get_proc_info(self.hostname, procname)
if pi is not None and pi.values() and pi.values()[0]:
for _p in pi.values()[0]:
ret = self.du.run_cmd(self.hostname, ['kill', sig, _p.pid],
sudo=True)
return ret
if pid is None:
return {'rc': 0, 'err': '', 'out': 'no pid to signal'}
return self.du.run_cmd(self.hostname, ['kill', sig, pid], sudo=True)
def _all_instance_pids(self, inst):
"""
Return a list of all ``PIDS`` that match the
instance name or None.
"""
cmd = self._instance_to_cmd(inst)
self.pu.get_proc_info(self.hostname, ".*" + cmd + ".*",
regexp=True)
_procs = self.pu.processes.values()
if _procs:
_pids = []
for _p in _procs:
_pids.extend(map(lambda x: x.pid, _p))
return _pids
return None
def _get_pid(self, inst):
"""
Get the ``PID`` associated to this instance.
Implementation note, the pid is read from the
daemon's lock file.
This is different than _all_instance_pids in that
the PID of the last running instance can be retrieved
with ``_get_pid`` but not with ``_all_instance_pids``
"""
priv = self._instance_to_privpath(inst)
lock = self._instance_to_lock(inst)
path = os.path.join(self.pbs_conf['PBS_HOME'], priv, lock)
rv = self.du.cat(self.hostname, path, sudo=True, logerr=False)
if ((rv['rc'] == 0) and (len(rv['out']) > 0)):
self.pid = rv['out'][0].strip()
else:
self.pid = None
return self.pid
def _start(self, inst=None, args=None, cmd_map=None, launcher=None):
"""
Generic service startup
:param inst: The instance to act upon
:type inst: str
:param args: Optional command-line arguments
:type args: List
:param cmd_map: Optional dictionary of command line
options to configuration variables
:type cmd_map: Dictionary
:param launcher: Optional utility to invoke the launch
of the service. This option only takes
effect on ``Unix/Linux``. The option can
be a string or a list.Options may be passed
to the launcher, for example to start a
service through the valgrind utility
redirecting to a log file,launcher could be
set to e.g.
``['valgrind', '--log-file=/tmp/vlgrd.out']``
or ``'valgrind --log-file=/tmp/vlgrd.out'``
"""
if launcher is None and self.launcher is not None:
launcher = self.launcher
app = self._instance_to_cmd(inst)
if app is None:
return
_m = ['service: starting', app]
if args is not None:
_m += ['with args: ']
_m += args
as_script = False
wait_on = True
if launcher is not None:
if isinstance(launcher, str):
launcher = launcher.split()
if app == 'pbs_server':
# running the pbs server through valgrind requires a bit of
# a dance because the pbs_server binary is pbs_server.bin
# and to run it requires being able to find libraries, so
# LD_LIBRARY_PATH is set and pbs_server.bin is run as a
# script
pexec = inst.pbs_conf['PBS_EXEC']
ldlib = ['LD_LIBRARY_PATH=' +
os.path.join(pexec, 'lib') + ':' +
os.path.join(pexec, 'pgsql', 'lib')]
app = 'pbs_server.bin'
else:
ldlib = []
cmd = ldlib + launcher
as_script = True
wait_on = False
else:
cmd = []
cmd += [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', app)]
if args is not None:
cmd += args
if not self.default_pbs_conf:
cmd = ['PBS_CONF_FILE=' + inst.pbs_conf_file] + cmd
as_script = True
if cmd_map is not None:
conf_cmd = self.du.map_pbs_conf_to_cmd(cmd_map,
pconf=self.pbs_conf)
cmd.extend(conf_cmd)
_m += conf_cmd
self.logger.info(" ".join(_m))
ret = self.du.run_cmd(self.hostname, cmd, sudo=True,
as_script=as_script, wait_on_script=wait_on,
level=logging.INFOCLI, logerr=False)
if ret['rc'] != 0:
raise PbsServiceError(rv=False, rc=ret['rc'], msg=ret['err'])
ret_msg = True
if ret['err']:
ret_msg = ret['err']
self.pid = self._get_pid(inst)
# get_pid gets information from a lock file that may not have been
# removed when the daemon stopped so we verify that the PID is
# actually alive in the list of pids returned by ps
live_pids = self._all_instance_pids(inst)
i = 0
while ((self.pid is None) or
(live_pids is None or self.pid not in live_pids)) and (i < 30):
time.sleep(1)
i += 1
live_pids = self._all_instance_pids(inst)
self.pid = self._get_pid(inst)
if live_pids is not None and self.pid in live_pids:
return ret_msg
if i == 30:
raise PbsServiceError(rv=False, rc=-1, msg="Could not find PID")
return ret_msg
def _stop(self, sig='-TERM', inst=None):
if inst is None:
return True
self._signal(sig, inst)
pid = self._get_pid(inst)
chk_pid = self._all_instance_pids(inst)
if pid is None or chk_pid is None:
return True
num_seconds = 0
while (chk_pid is not None) and (str(pid) in chk_pid):
if num_seconds > 60:
m = (self.logprefix + 'could not stop service ' +
self._instance_to_servicename(inst))
raise PbsServiceError(rv=False, rc=-1, msg=m)
time.sleep(1)
num_seconds += 1
chk_pid = self._all_instance_pids(inst)
inst.pid = None
return True
def log_lines(self, logtype, id=None, n=50, tail=True, day=None,
starttime=None, endtime=None):
"""
Return the last ``<n>`` lines of a PBS log file, which
can be one of ``server``, ``scheduler``, ``MoM``, or
``tracejob``
:param logtype: The entity requested, an instance of a
Scheduler, Server or MoM object, or the
string 'tracejob' for tracejob
:type logtype: str or object
:param id: The id of the object to trace. Only used for
tracejob
:param n: One of 'ALL' of the number of lines to
process/display, defaults to 50.
:type n: int
:param tail: if True, parse log from the end to the start,
otherwise parse from the start to the end.
Defaults to True.
:type tail: bool
:param day: Optional day in ``YYYMMDD`` format. Defaults
to current day
:type day: int
:param starttime: date timestamp to start matching
:param endtime: date timestamp to end matching
:returns: Last ``<n>`` lines of logfile for ``Server``,
``Scheduler``, ``MoM or tracejob``
"""
logval = None
lines = None
sudo = False
try:
if logtype == 'tracejob':
if id is None:
return None
cmd = [os.path.join(
self.pbs_conf['PBS_EXEC'],
'bin',
'tracejob')]
cmd += [str(id)]
lines = self.du.run_cmd(self.hostname, cmd)['out']
if n != 'ALL':
lines = lines[-n:]
else:
if day is None:
day = time.strftime("%Y%m%d", time.localtime(time.time()))
if logtype == 'accounting':
filename = os.path.join(self.pbs_conf['PBS_HOME'],
'server_priv', 'accounting', day)
sudo = True
else:
logval = self._instance_to_logpath(logtype)
if logval:
filename = os.path.join(self.pbs_conf['PBS_HOME'],
logval, day)
if n == 'ALL':
if self._is_local and not sudo:
lines = open(filename)
else:
lines = self.du.cat(self.hostname, filename, sudo=sudo,
level=logging.DEBUG2)['out']
# tail is not a standard, e.g. on Solaris it does not recognize
# -n. We circumvent this problem by using PTL's version of tail
# but it currently only works on a local host, for remote hosts
# we fall back to using tail/head -n
elif self._is_local and not sudo:
if tail:
futils = FileUtils(filename, FILE_TAIL)
else:
futils = FileUtils(filename)
lines = futils.next(n)
else:
if tail:
cmd = ['/usr/bin/tail']
else:
cmd = ['/usr/bin/head']
pyexec = os.path.join(self.pbs_conf['PBS_EXEC'], 'python',
'bin', 'python')
osflav = self.du.get_platform(self.hostname, pyexec)
if osflav.startswith('sunos'):
cmd += ['-']
else:
cmd += ['-n']
cmd += [str(n), filename]
lines = self.du.run_cmd(self.hostname, cmd, sudo=sudo,
level=logging.DEBUG2)['out']
except:
self.logger.error('error in log_lines ')
traceback.print_exc()
return None
return lines
def _log_match(self, logtype, msg, id=None, n=50, tail=True,
allmatch=False, regexp=False, day=None, max_attempts=1,
interval=1, starttime=None, endtime=None,
level=logging.INFO):
"""
If ``'msg'`` found in the ``'n'`` lines of the log file,
returns a ``tupe (x,y)`` where x is the matching line
number and y the line itself. If no match,return None.
If allmatch is True, a list of tuples is returned.
:param logtype: The entity requested, an instance of a
Scheduler, Server, or MoM object, or the
strings 'tracejob' for tracejob or
'accounting' for accounting logs.
:param id: The id of the object to trace. Only used for
tracejob
:param n: 'ALL' or the number of lines to search through,
defaults to 50
:param tail: If true (default), starts from the end of
the file
:type tail: bool
:param allmatch: If True all matching lines out of then
parsed are returned as a list. Defaults
to False
:type allmatch: bool
:param regexp: If true msg is a Python regular expression.
Defaults to False
:type regex: bool
:param day: Optional day in YYYMMDD format.
:param max_attempts: the number of attempts to make to find
a matching entry
:type max_attemps: int
:param interval: the interval between attempts
:type interval: int
:param starttime: If set ignore matches that occur before
specified time
:param endtime: If set ignore matches that occur after
specified time
.. note:: The matching line number is relative to the record
number, not the absolute line number in the file.
"""
try:
from ptl.utils.pbs_logutils import PBSLogUtils
except:
self.logger.error('error loading ptl.utils.pbs_logutils')
return None
if self.logutils is None:
self.logutils = PBSLogUtils()
rv = (None, None)
attempt = 1
name = self._instance_to_servicename(logtype)
infomsg = (name + ' ' + self.shortname +
' log match: searching for "' + msg + '"')
if regexp:
infomsg += ' - using regular expression '
if allmatch:
infomsg += ' - on all matches '
attemptmsg = ' - No match'
while attempt <= max_attempts:
if attempt > 1:
attemptmsg = ' - attempt ' + str(attempt)
lines = self.log_lines(logtype, id, n=n, tail=tail, day=day,
starttime=starttime, endtime=endtime)
rv = self.logutils.match_msg(lines, msg, allmatch=allmatch,
regexp=regexp, starttime=starttime,
endtime=endtime)
if rv:
self.logger.log(level, infomsg + '... OK')
break
else:
if ((starttime is not None or endtime is not None) and
n != 'ALL'):
if attempt > max_attempts:
# We will do one last attempt to match in case the
# number of lines that were provided did not capture
# the start or end time of interest
max_attempts += 1
n = 'ALL'
self.logger.log(level, infomsg + attemptmsg)
attempt += 1
time.sleep(interval)
try:
# Depending on whether the hostname is local or remote and whether
# sudo privileges were required, lines returned by log_lines can be
# an open file descriptor, we close here but ignore errors in case
# any were raised for all irrelevant cases
lines.close()
except:
pass
return rv
def accounting_match(self, msg, id=None, n=50, tail=True,
allmatch=False, regexp=False, day=None,
max_attempts=1, interval=1, starttime=None,
endtime=None):
"""
Find msg in accounting log.
If ``'msg'`` found in the ``'n'`` lines of the log file,
returns a ``tupe (x,y)`` where x is the matching line
number and y the line itself. If no match,return None.
If allmatch is True, a list of tuples is returned.
:param id: The id of the object to trace. Only used for
tracejob
:param n: 'ALL' or the number of lines to search through,
defaults to 50
:type n: int
:param tail: If true (default), starts from the end of
the file
:type tail: bool
:param allmatch: If True all matching lines out of then
parsed are returned as a list. Defaults
to False
:type allmatch: bool
:param regexp: If true msg is a Python regular expression.
Defaults to False
:type regexp: bool
:param day: Optional day in YYYMMDD format.
:param max_attempts: the number of attempts to make to
find a matching entry
:type max_attemplts: int
:param interval: the interval between attempts
:type interval: int
:param starttime: If set ignore matches that occur before
specified time
:param endtime: If set ignore matches that occur after
specified time
.. note:: The matching line number is relative to the
record number, not the absolute line number
in the file.
"""
return self._log_match('accounting', msg, id, n, tail, allmatch,
regexp, day, max_attempts, interval, starttime,
endtime)
def tracejob_match(self, msg, id=None, n=50, tail=True, allmatch=False,
regexp=False, **kwargs):
"""
Find msg in tracejob log. See _log_match for details
"""
return self._log_match('tracejob', msg, id, n, tail, allmatch,
regexp, kwargs)
def _save_config_file(self, dict_conf, fname):
ret = self.du.cat(self.hostname, fname, sudo=True)
if ret['rc'] == 0:
dict_conf[fname] = ret['out']
else:
self.logger.error('error saving configuration ' + fname)
def _load_configuration(self, infile, objtype=None):
"""
Load configuration as was saved in infile
:param infile: the file in which configuration
was saved
:type infile: str
:param objtype: the object type to load configuration
for, one of server, scheduler, mom or
if None, load all objects in infile
"""
if os.path.isfile(infile):
conf = {}
f = open(infile, 'r')
# load all objects from the Pickled file
while True:
try:
conf = cPickle.load(f)
except:
break
f.close()
if objtype and objtype in conf:
conf = conf[objtype]
else:
# load all object types that could be in infile
newconf = {}
for ky in [MGR_OBJ_SERVER, MGR_OBJ_SCHED, MGR_OBJ_NODE]:
if ky not in conf:
conf[ky] = {}
newconf = dict(newconf.items() + conf[ky].items())
conf = newconf
for k, v in conf.items():
(fd, fn) = self.du.mkstemp()
# handle server data saved as output of qmgr commands by piping
# data back into qmgr
if k.startswith('qmgr_'):
qmgr = os.path.join(self.client_conf['PBS_EXEC'],
'bin', 'qmgr')
os.write(fd, "\n".join(v))
self.du.run_cmd(
self.hostname,
[qmgr],
cstdin=fd,
sudo=True)
else:
os.write(fd, "\n".join(v))
# append the last line
os.write(fd, "\n")
self.du.run_cmd(self.hostname, ['cp', fn, k], sudo=True)
os.close(fd)
os.remove(fn)
return True
return False
def get_tempdir(self):
"""
platform independent call to get a temporary directory
"""
return self.du.get_tempdir(self.hostname)
def __str__(self):
return (self.__class__.__name__ + ' ' + self.hostname + ' config ' +
self.pbs_conf_file)
def __repr__(self):
return (self.__class__.__name__ + '/' + self.pbs_conf_file + '@' +
self.hostname)
class Comm(PBSService):
"""
PBS ``Comm`` configuration and control
"""
"""
:param name: The hostname of the Comm. Defaults to current hostname.
:type name: str
:param attrs: Dictionary of attributes to set, these will override
defaults.
:type attrs: dictionary
:param pbsconf_file: path to config file to parse for PBS_HOME,
PBS_EXEC, etc
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc) to
mapped files from PBS diag directory
:type diagmap: dictionary
:param diag: path to PBS diag directory (This will override diagmap)
:type diag: str or None
:param server: A PBS server instance to which this Comm is associated
:type server: str
:param db_access: set to either file containing credentials to DB access or
dictionary containing {'dbname':...,'user':...,
'port':...}
:type db_access: str or dictionary
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, pbsconf_file=None, diagmap={},
diag=None, server=None, db_access=None):
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(name, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
PBSService.__init__(self, name, attrs, self.dflt_attributes,
pbsconf_file, diagmap, diag)
_m = ['Comm ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.conf_to_cmd_map = {
'PBS_COMM_ROUTERS': '-r',
'PBS_COMM_THREADS': '-t'
}
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
def isUp(self):
"""
Check for comm up
"""
return super(Comm, self)._isUp(self)
def signal(self, sig):
"""
Send signal to comm
"""
self.logger.info(self.logprefix + 'sent signal ' + sig)
return super(Comm, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the comm pid
"""
return super(Comm, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids of given instance
"""
return super(Comm, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the comm
:param args: Argument required to start the comm
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
return super(Comm, self)._start(inst=self, args=args,
cmd_map=self.conf_to_cmd_map,
launcher=launcher)
else:
try:
rv = self.pi.start_comm()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the comm.
:param sig: Signal to stop the comm
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Comm on host ' +
self.hostname)
return super(Comm, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_comm()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the comm.
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the comm logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp,
day, max_attempts, interval, starttime, endtime,
level=level)
class Server(PBSService):
"""
PBS server ``configuration`` and ``control``
The Server class is a container to PBS server attributes
and implements wrappers to the ``IFL API`` to perform
operations on the server. For example to submit, status,
delete, manage, etc... jobs, reservations and configurations.
This class also offers higher-level routines to ease testing,
see functions, for ``example: revert_to_defaults,
init_logging, expect, counter.``
The ptl_conf dictionary holds general configuration for the
framework's operations, specifically, one can control:
mode: set to ``PTL_CLI`` to operate in ``CLI`` mode or
``PTL_API`` to operate in ``API`` mode
expect_max_attempts: the default maximum number of attempts
to be used\ by expect. Defaults to 60
expect_interval: the default time interval (in seconds)
between expect\ requests. Defaults to 0.5
update_attributes: the default on whether Object attributes
should be\ updated using a list of dictionaries. Defaults
to True
:param name: The hostname of the server. Defaults to
calling pbs_default()
:type name: str
:param attrs: Dictionary of attributes to set, these will
override defaults.
:type attrs: Dictionary
:param defaults: Dictionary of default attributes.
Default: dflt_attributes
:type defaults: Dictionary
:param pbsconf_file: path to config file to parse for PBS_HOME,
PBS_EXEC, etc
:type pbsconf_file: str
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str
:param client: The host to use as client for CLI queries.
Defaults to the local hostname.
:type client: str
:param client_pbsconf_file: The path to a custom PBS_CONF_FILE
on the client host. Defaults to
the same path as pbsconf_file.
:type client_pbsconf_file: str
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
{'dbname':...,'user':...,'port':...}
:param stat: if True, stat the server attributes
:type stat: bool
"""
logger = logging.getLogger(__name__)
dflt_attributes = {
ATTR_scheduling: "True",
ATTR_dfltque: "workq",
ATTR_logevents: "511",
ATTR_mailfrom: "adm",
ATTR_queryother: "True",
ATTR_rescdflt + ".ncpus": "1",
ATTR_DefaultChunk + ".ncpus": "1",
ATTR_schedit: "600",
ATTR_ResvEnable: "True",
ATTR_nodefailrq: "310",
ATTR_maxarraysize: "10000",
ATTR_license_linger: "3600",
ATTR_EligibleTimeEnable: "False",
ATTR_max_concurrent_prov: "5",
ATTR_FlatUID: 'True',
}
ptl_conf = {
'mode': PTL_API,
'expect_max_attempts': 60,
'expect_interval': 0.5,
'update_attributes': True,
}
# this pattern is a bit relaxed to match common developer build numbers
version_tag = re.compile("[a-zA-Z_]*(?P<version>[\d\.]+.[\w\d\.]*)[\s]*")
actions = ExpectActions()
def __init__(self, name=None, attrs={}, defaults={}, pbsconf_file=None,
diagmap={}, diag=None, client=None, client_pbsconf_file=None,
db_access=None, stat=True):
self.jobs = {}
self.nodes = {}
self.reservations = {}
self.queues = {}
self.resources = {}
self.hooks = {}
self.pbshooks = {}
self.entities = {}
self.scheduler = None
self.version = None
self.default_queue = None
self.last_error = [] # type: array. Set for CLI IFL errors. Not reset
self.last_rc = None # Set for CLI IFL return code. Not thread-safe
# default timeout on connect/disconnect set to 60s to mimick the qsub
# buffer introduced in PBS 11
self._conn_timeout = 60
self._conn_timer = None
self._conn = None
self._db_conn = None
self.current_user = pwd.getpwuid(os.getuid())[0]
if len(defaults.keys()) == 0:
defaults = self.dflt_attributes
self.pexpect_timeout = 15
self.pexpect_sleep_time = .1
PBSService.__init__(self, name, attrs, defaults, pbsconf_file, diagmap,
diag)
_m = ['server ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.set_client(client)
if client_pbsconf_file is None:
self.client_pbs_conf_file = self.du.get_pbs_conf_file(self.client)
else:
self.client_pbs_conf_file = client_pbsconf_file
self.client_conf = self.du.parse_pbs_config(
self.client, file=self.client_pbs_conf_file)
if self.client_pbs_conf_file == '/etc/pbs.conf':
self.default_client_pbs_conf = True
elif (('PBS_CONF_FILE' not in os.environ) or
(os.environ['PBS_CONF_FILE'] != self.client_pbs_conf_file)):
self.default_client_pbs_conf = False
else:
self.default_client_pbs_conf = True
a = {}
if os.getuid() == 0:
a = {ATTR_aclroot: 'root'}
self.dflt_attributes.update(a)
if not API_OK:
# mode must be set before the first stat call
self.set_op_mode(PTL_CLI)
if stat:
try:
tmp_attrs = self.status(SERVER, level=logging.DEBUG,
db_access=db_access)
except (PbsConnectError, PbsStatusError):
tmp_attrs = None
if tmp_attrs is not None and len(tmp_attrs) > 0:
self.attributes = tmp_attrs[0]
if ATTR_dfltque in self.attributes:
self.default_queue = self.attributes[ATTR_dfltque]
self.update_version_info()
def update_version_info(self):
"""
Update the version information.
"""
if ATTR_version not in self.attributes:
self.attributes[ATTR_version] = 'unknown'
else:
m = self.version_tag.match(self.attributes[ATTR_version])
if m:
v = m.group('version')
self.version = LooseVersion(v)
self.logger.info(self.logprefix + 'version ' +
self.attributes[ATTR_version])
@classmethod
def set_update_attributes(cls, val):
"""
Set update attributes
"""
cls.logger.info('setting update attributes ' + str(val))
if val == 1 or val[0] in ('t', 'T'):
val = True
else:
val = False
cls.ptl_conf['update_attributes'] = val
@classmethod
def set_expect_max_attempts(cls, val):
"""
Set expect max attempts
"""
cls.logger.info('setting expect max attempts ' + str(val))
cls.ptl_conf['expect_max_attempts'] = int(val)
@classmethod
def set_expect_interval(cls, val):
"""
Set expect interval
"""
cls.logger.info('setting expect interval ' + str(val))
cls.ptl_conf['expect_interval'] = float(val)
def set_client(self, name=None):
"""
Set server client
:param name: Client name
:type name: str
"""
if name is None:
self.client = socket.gethostname()
else:
self.client = name
def _connect(self, hostname, attempt=1):
if ((self._conn is None or self._conn < 0) or
(self._conn_timeout == 0 or self._conn_timer is None)):
self._conn = pbs_connect(hostname)
self._conn_timer = time.time()
if self._conn is None or self._conn < 0:
if attempt > 5:
m = self.logprefix + 'unable to connect'
raise PbsConnectError(rv=None, rc=-1, msg=m)
else:
self._disconnect(self._conn, force=True)
time.sleep(1)
return self._connect(hostname, attempt + 1)
return self._conn
def _disconnect(self, conn, force=False):
"""
disconnect a connection to a Server.
For performance of the API calls, a connection is
maintained up to _conn_timer, unless the force parameter
is set to True
:param conn: Server connection
:param force: If true then diconnect forcefully
:type force: bool
"""
if ((conn is not None and conn >= 0) and
(force or
(self._conn_timeout == 0 or
(self._conn_timer is not None and
(time.time() - self._conn_timer > self._conn_timeout))))):
pbs_disconnect(conn)
self._conn_timer = None
self._conn = None
def set_connect_timeout(self, timeout=0):
"""
Set server connection timeout
:param timeout: Timeout value
:type timeout: int
"""
self._conn_timeout = timeout
def get_op_mode(self):
"""
Returns operating mode for calls to the PBS server.
Currently, two modes are supported, either the ``API``
or the ``CLI``. Default is ``API``
"""
if (not API_OK or (self.ptl_conf['mode'] == PTL_CLI)):
return PTL_CLI
return PTL_API
def set_op_mode(self, mode):
"""
set operating mode to one of either ``PTL_CLI`` or
``PTL_API``.Returns the mode that was set which can
be different from the value requested, for example, if
requesting to set ``PTL_API``, in the absence of the
appropriate SWIG wrappers, the library will fall back to
``CLI``, or if requesting ``PTL_CLI`` and there is no
``PBS_EXEC`` on the system, None is returned.
:param mode: Operating mode
:type mode: str
"""
if mode == PTL_API:
if self._conn is not None or self._conn < 0:
self._conn = None
if not API_OK:
self.logger.error(self.logprefix +
'API submission is not available')
return PTL_CLI
elif mode == PTL_CLI:
if ((not self.has_diag) and
not os.path.isdir(os.path.join(self.client_conf['PBS_EXEC'],
'bin'))):
self.logger.error(self.logprefix +
'PBS commands are not available')
return None
else:
self.logger.error(self.logprefix + "Unrecognized operating mode")
return None
self.ptl_conf['mode'] = mode
self.logger.info(self.logprefix + 'server operating mode set to ' +
mode)
return mode
def add_expect_action(self, name=None, action=None):
"""
Add an action handler to expect. Expect Actions are
custom handlers that are triggered when an unexpected
value is encountered
:param name: Action name
:type name: str or None
:param action: Action to add
"""
if name is None and action.name is None:
return
if name is None and action.name is not None:
name = action.name
if not self.actions.has_action(name):
self.actions.add_action(action, self.shortname)
def set_attributes(self, a={}):
"""
set server attributes
:param a: Attribute dictionary
:type a: Dictionary
"""
super(Server, self).set_attributes(a)
self.__dict__.update(a)
def isUp(self):
"""
returns ``True`` if server is up and ``False`` otherwise
"""
if self.has_diag:
return True
i = 0
op_mode = self.get_op_mode()
if ((op_mode == PTL_API) and (self._conn is not None)):
self._disconnect(self._conn, force=True)
while i < 20:
rv = False
try:
if op_mode == PTL_CLI:
self.status(SERVER, level=logging.DEBUG, logerr=False)
else:
c = self._connect(self.hostname)
self._disconnect(c, force=True)
return True
except (PbsConnectError, PbsStatusError):
# if the status/connect operation fails then there might be
# chances that server process is running but not responsive
# so we wait until the server is reported operational.
rv = self._isUp(self)
# We really mean to check != False rather than just "rv"
if str(rv) != 'False':
self.logger.warning('Server process started' +
'but not up yet')
time.sleep(1)
i += 1
else:
# status/connect failed + no server process means
# server is actually down
return False
return False
def signal(self, sig):
"""
Send signal to server
:param sig: Signal to send
:type sig: str
"""
self.logger.info('server ' + self.shortname + ': sent signal ' + sig)
return super(Server, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the server pid
"""
return super(Server, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids for a given instance
"""
return super(Server, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the PBS server
:param args: Argument required to start the server
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
rv = super(Server, self)._start(inst=self, args=args,
launcher=launcher)
else:
try:
rv = self.pi.start_server()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
if self.isUp():
return rv
else:
raise PbsServiceError(rv=False, rc=1, msg=rv['err'])
def stop(self, sig=None):
"""
Stop the PBS server
:param sig: Signal to stop PBS server
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Server on host ' +
self.hostname)
rc = super(Server, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_server()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg,
post=self._disconnect, conn=self._conn,
force=True)
rc = True
self._disconnect(self._conn, force=True)
return rc
def restart(self):
"""
Terminate and start a PBS server.
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the PBS server logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp,
day, max_attempts, interval, starttime, endtime,
level=level)
def revert_to_defaults(self, reverthooks=True, revertqueues=True,
revertresources=True, delhooks=True,
delqueues=True, server_stat=None):
"""
reset server attributes back to out of box defaults.
:param reverthooks: If True disable all hooks. Defaults
to True
:type reverthooks: bool
:param revertqueues: If True disable all non-default
queues. Defaults to True
:type revertqueues: bool
:param revertresources: If True, resourcedef file is
removed. Defaults to True.
Reverting resources causes a server
restart to occur.
:type revertresources: bool
:param delhooks: If True, hooks are deleted, if deletion
fails, fall back to reverting hooks. Defaults
to True.
:type delhooks: bool
:param delqueues: If True, all non-default queues are deleted,
will attempt to delete all jobs first, if it
fails, revertqueues will be honored,
otherwise,revertqueues is ignored. Defaults
to True
:type delqueues: bool
:returns: True upon success and False if an error is
encountered.
:raises: PbsStatusError or PbsManagerError
"""
ignore_attrs = ['id', 'pbs_license', ATTR_NODE_ProvisionEnable]
ignore_attrs += [ATTR_status, ATTR_total, ATTR_count]
ignore_attrs += [ATTR_rescassn, ATTR_FLicenses, ATTR_SvrHost]
ignore_attrs += [ATTR_license_count, ATTR_version, ATTR_managers]
ignore_attrs += [ATTR_pbs_license_info]
unsetlist = []
setdict = {}
self.logger.info(self.logprefix +
'reverting configuration to defaults')
self.cleanup_jobs_and_reservations()
if server_stat is None:
server_stat = self.status(SERVER, level=logging.DEBUG)[0]
for k in server_stat.keys():
if (k in ignore_attrs) or (k in self.dflt_attributes.keys()):
continue
elif (('.' in k) and (k.split('.')[0] in ignore_attrs)):
continue
else:
unsetlist.append(k)
if len(unsetlist) != 0:
self.manager(MGR_CMD_UNSET, MGR_OBJ_SERVER, unsetlist)
for k in self.dflt_attributes.keys():
if(k not in self.attributes or
self.attributes[k] != self.dflt_attributes[k]):
setdict[k] = self.dflt_attributes[k]
if delhooks:
reverthooks = False
hooks = self.status(HOOK, level=logging.DEBUG)
hooks = [h['id'] for h in hooks]
if len(hooks) > 0:
self.manager(MGR_CMD_DELETE, HOOK, id=hooks, expect=True)
if delqueues:
revertqueues = False
queues = self.status(QUEUE, level=logging.DEBUG)
queues = [q['id'] for q in queues]
if len(queues) > 0:
self.manager(MGR_CMD_DELETE, QUEUE, id=queues, expect=True)
a = {ATTR_qtype: 'Execution',
ATTR_enable: 'True',
ATTR_start: 'True'}
self.manager(MGR_CMD_CREATE, QUEUE, a, id='workq', expect=True)
setdict.update({ATTR_dfltque: 'workq'})
if reverthooks:
hooks = self.status(HOOK, level=logging.DEBUG)
hooks = [h['id'] for h in hooks]
a = {ATTR_enable: 'false'}
if len(hooks) > 0:
self.manager(MGR_CMD_SET, MGR_OBJ_HOOK, a, hooks, expect=True)
if revertqueues:
self.status(QUEUE, level=logging.DEBUG)
queues = []
for (qname, qobj) in self.queues.items():
# skip reservation queues. This syntax for Python 2.4
# compatibility
if (qname.startswith('R') or qname.startswith('S') or
qname == server_stat[ATTR_dfltque]):
continue
qobj.revert_to_defaults()
queues.append(qname)
a = {ATTR_enable: 'false'}
self.manager(MGR_CMD_SET, QUEUE, a, id=queues, expect=True)
a = {ATTR_enable: 'True', ATTR_start: 'True'}
self.manager(MGR_CMD_SET, MGR_OBJ_QUEUE, a,
id=server_stat[ATTR_dfltque], expect=True)
if len(setdict) > 0:
self.manager(MGR_CMD_SET, MGR_OBJ_SERVER, setdict)
if revertresources:
try:
rescs = self.status(RSC)
rescs = [r['id'] for r in rescs]
except:
rescs = []
if len(rescs) > 0:
self.manager(MGR_CMD_DELETE, RSC, id=rescs, expect=True)
return True
def save_configuration(self, outfile, mode='a'):
"""
Save a server configuration, this includes:
- ``server_priv/resourcedef``
- ``qmgr -c "print server"``
- ``qmgr -c "print sched"``
- ``qmgr -c "print hook"``
:param outfile: the output file to which onfiguration is
saved
:type outfile: str
:param mode: The mode in which to open outfile to save
configuration. The first object being saved
should open this file with 'w' and subsequent
calls from other objects should save with
mode 'a' or 'a+'. Defaults to a+
:type mode: str
:returns: True on success, False on error
"""
conf = {}
sconf = {MGR_OBJ_SERVER: conf}
rd = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
self._save_config_file(conf, rd)
qmgr = os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmgr')
ret = self.du.run_cmd(self.client, [qmgr, '-c', 'p s'], sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_server'] = ret['out']
ret = self.du.run_cmd(self.hostname, [qmgr, '-c', 'p sched'],
sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_sched'] = ret['out']
ret = self.du.run_cmd(self.hostname, [qmgr, '-c', 'p h'], sudo=True)
if ret['rc'] != 0:
return False
else:
conf['qmgr_print_hook'] = ret['out']
try:
f = open(outfile, mode)
cPickle.dump(sconf, f)
f.close()
except:
self.logger.error('Error processing file ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file ``infile``
"""
self.revert_to_defaults()
self._load_configuration(infile, MGR_OBJ_SERVER)
def get_hostname(self):
"""
return the default server hostname
"""
if self.get_op_mode() == PTL_CLI:
return self.hostname
return pbs_default()
def _db_connect(self, db_access=None):
if self._db_conn is None:
if 'user' not in db_access or\
'password' not in db_access:
self.logger.error('missing credentials to access DB')
return None
if 'dbname' not in db_access:
db_access['dbname'] = 'pbs_datastore'
if 'port' not in db_access:
db_access['port'] = '15007'
if 'host' not in db_access:
db_access['host'] = self.hostname
user = db_access['user']
dbname = db_access['dbname']
port = db_access['port']
password = db_access['password']
host = db_access['host']
cred = "host=%s dbname=%s user=%s password=%s port=%s" % \
(host, dbname, user, password, port)
self._db_conn = psycopg2.connect(cred)
return self._db_conn
def _db_server_host(self, cur=None, db_access=None):
"""
Get the server host name from the database. The server
host name is stored in the pbs.server table and not in
pbs.server_attr.
:param cur: Optional, a predefined cursor to use to
operate on the DB
:param db_acccess: set to either file containing
credentials to DB access or
dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
local_init = False
if cur is None:
conn = self._db_connect(db_access)
local_init = True
if conn is None:
return None
cur = conn.cursor()
# obtain server name. The server hostname is stored in table
# pbs.server
cur.execute('SELECT sv_hostname from pbs.server')
if local_init:
conn.commit()
tmp_query = cur.fetchone()
if len(tmp_query) > 0:
svr_host = tmp_query[0]
else:
svr_host = "unknown"
return svr_host
def status_db(self, obj_type=None, attrib=None, id=None, db_access=None,
logerr=True):
"""
Status PBS objects from the SQL database
:param obj_type: The type of object to query, one of the
* objects,\ Default: SERVER
:param attrib: Attributes to query, can a string, a list,
a dictionary\ Default: None. All attributes
will be queried
:type attrib: str or list or dictionary
:param id: An optional identifier, the name of the object
to status
:type id: str
:param db_access: information needed to access the database,
can be either a file containing user,
port, dbname, password info or a
dictionary of key/value entries
:type db_access: str or dictionary
"""
if not PSYCOPG:
self.logger.error('psycopg module unavailable, install from ' +
'http://initd.org/psycopg/ and retry')
return None
if not isinstance(db_access, dict):
try:
f = open(db_access, 'r')
except IOError:
self.logger.error('Unable to access ' + db_access)
return None
lines = f.readlines()
db_access = {}
for line in lines:
(k, v) = line.split('=')
db_access[k] = v
conn = self._db_connect(db_access)
if conn is None:
return None
cur = conn.cursor()
stmt = []
if obj_type == SERVER:
stmt = ["SELECT sv_name,attr_name,attr_resource,attr_value " +
"FROM pbs.server_attr"]
svr_host = self.hostname # self._db_server_host(cur)
elif obj_type == SCHED:
stmt = ["SELECT sched_name,attr_name,attr_resource,attr_value " +
"FROM pbs.scheduler_attr"]
# reuse server host name for sched host
svr_host = self.hostname
elif obj_type == JOB:
stmt = ["SELECT ji_jobid,attr_name,attr_resource,attr_value " +
"FROM pbs.job_attr"]
if id:
id_stmt = ["ji_jobid='" + id + "'"]
elif obj_type == QUEUE:
stmt = ["SELECT qu_name,attr_name,attr_resource,attr_value " +
"FROM pbs.queue_attr"]
if id:
id_stmt = ["qu_name='" + id + "'"]
elif obj_type == RESV:
stmt = ["SELECT ri_resvid,attr_name,attr_resource,attr_value " +
"FROM pbs.resv_attr"]
if id:
id_stmt = ["ri_resvid='" + id + "'"]
elif obj_type in (NODE, VNODE):
stmt = ["SELECT nd_name,attr_name,attr_resource,attr_value " +
"FROM pbs.node_attr"]
if id:
id_stmt = ["nd_name='" + id + "'"]
else:
self.logger.error('status: object type not handled')
return None
if attrib or id:
stmt += ["WHERE"]
extra_stmt = []
if attrib:
if isinstance(attrib, dict):
attrs = attrib.keys()
elif isinstance(attrib, list):
attrs = attrib
elif isinstance(attrib, str):
attrs = attrib.split(',')
for a in attrs:
extra_stmt += ["attr_name='" + a + "'"]
stmt += [" OR ".join(extra_stmt)]
if id:
stmt += [" AND ", " AND ".join(id_stmt)]
exec_stmt = " ".join(stmt)
self.logger.debug('server: executing db statement: ' + exec_stmt)
cur.execute(exec_stmt)
conn.commit()
_results = cur.fetchall()
obj_dict = {}
for _res in _results:
if obj_type in (SERVER, SCHED):
obj_name = svr_host
else:
obj_name = _res[0]
if obj_name not in obj_dict:
obj_dict[obj_name] = {'id': obj_name}
attr = _res[1]
if _res[2]:
attr += '.' + _res[2]
obj_dict[obj_name][attr] = _res[3]
return obj_dict.values()
#
# Begin IFL Wrappers
#
def status(self, obj_type=SERVER, attrib=None, id=None,
extend=None, level=logging.INFO, db_access=None, runas=None,
resolve_indirectness=False, logerr=True):
"""
Stat any PBS object ``[queue, server, node, hook, job,
resv, sched]``.If the Server is setup from diag input,
see diag or diagmap member, the status calls are routed
directly to the data on files from diag.
The server can be queried either through the 'qstat'
command line tool or through the wrapped PBS IFL api,
see set_op_mode.
Return a dictionary representation of a batch status object
raises ``PbsStatsuError on error``.
:param obj_type: The type of object to query, one of the *
objects.Default: SERVER
:param attrib: Attributes to query, can be a string, a
list, a dictionary.Default is to query all
attributes.
:type attrib: str or list or dictionary
:param id: An optional id, the name of the object to status
:type id: str
:param extend: Optional extension to the IFL call
:param level: The logging level, defaults to INFO
:type level: str
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
:param runas: run stat as user
:type runas: str
:param resolve_indirectness: If True resolves indirect node
resources values
:type resolve_indirectness: bool
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
In addition to standard IFL stat call, this wrapper handles
a few cases that aren't implicitly offered by pbs_stat*,
those are for Hooks,Resources, and a formula evaluation.
"""
prefix = 'status on ' + self.shortname
if runas:
prefix += ' as ' + str(runas)
prefix += ': '
self.logit(prefix, obj_type, attrib, id, level)
bs = None
bsl = []
freebs = False
# 2 - Special handling for gathering the job formula value.
if attrib is not None and PTL_FORMULA in attrib:
if (((isinstance(attrib, list) or isinstance(attrib, dict)) and
(len(attrib) == 1)) or
(isinstance(attrib, str) and len(attrib.split(',')) == 1)):
bsl = self.status(
JOB, 'Resource_List.select', id=id, extend='t')
if self.scheduler is None:
self.scheduler = Scheduler(self.hostname)
if 'log_filter' in self.scheduler.sched_config:
_prev_filter = self.scheduler.sched_config['log_filter']
if int(_prev_filter) & 2048:
self.scheduler.set_sched_config(
{'log_filter': 2048})
self.manager(MGR_CMD_SET, SERVER, {'scheduling': 'True'})
if id is None:
_formulas = self.scheduler.job_formula()
else:
_formulas = {id: self.scheduler.job_formula(jobid=id)}
if not int(_prev_filter) & 2048:
self.scheduler.set_sched_config(
{'log_filter': int(_prev_filter)})
if len(bsl) == 0:
bsl = [{'id': id}]
for _b in bsl:
if _b['id'] in _formulas:
_b[PTL_FORMULA] = _formulas[_b['id']]
return bsl
# 3- Serve data from database if requested... and available for the
# given object type
if db_access and obj_type in (SERVER, SCHED, NODE, QUEUE, RESV, JOB):
bsl = self.status_db(obj_type, attrib, id, db_access=db_access,
logerr=logerr)
# 4- Serve data from diag files
elif obj_type in self.diagmap:
if obj_type in (HOOK, PBS_HOOK):
for f in self.diagmap[obj_type]:
_b = self.utils.file_to_dictlist(f, attrib)
if _b and 'hook_name' in _b[0]:
_b[0]['id'] = _b[0]['hook_name']
else:
_b[0]['id'] = os.path.basename(f)
if id is None or id == _b[0]['id']:
bsl.extend(_b)
else:
bsl = self.utils.file_to_dictlist(self.diagmap[obj_type],
attrib, id=id)
# 6- Stat using PBS CLI commands
elif self.get_op_mode() == PTL_CLI:
tgt = self.client
if obj_type in (JOB, QUEUE, SERVER):
pcmd = [os.path.join(
self.client_conf['PBS_EXEC'],
'bin',
'qstat')]
if extend:
pcmd += ['-' + extend]
if obj_type == JOB:
pcmd += ['-f']
if id:
pcmd += [id]
else:
pcmd += ['@' + self.hostname]
elif obj_type == QUEUE:
pcmd += ['-Qf']
if id:
if '@' not in id:
pcmd += [id + '@' + self.hostname]
else:
pcmd += [id]
else:
pcmd += ['@' + self.hostname]
elif obj_type == SERVER:
pcmd += ['-Bf', self.hostname]
elif obj_type in (NODE, VNODE, HOST):
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbsnodes')]
pcmd += ['-s', self.hostname]
if obj_type in (NODE, VNODE):
pcmd += ['-v']
if obj_type == HOST:
pcmd += ['-H']
if id:
pcmd += [id]
else:
pcmd += ['-a']
elif obj_type == RESV:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rstat')]
pcmd += ['-f']
if id:
pcmd += [id]
elif obj_type in (SCHED, PBS_HOOK, HOOK, RSC):
try:
rc = self.manager(MGR_CMD_LIST, obj_type, attrib, id,
runas=runas, level=level, logerr=logerr)
except PbsManagerError, e:
rc = e.rc
# PBS bug, no hooks yields a return code of 1, we ignore
if obj_type != HOOK:
raise PbsStatusError(
rc=rc, rv=[], msg=self.geterrmsg())
if rc == 0:
if obj_type == HOOK:
o = self.hooks
elif obj_type == PBS_HOOK:
o = self.pbshooks
elif obj_type == SCHED:
if self.scheduler is None:
return []
o = {'sched': self.scheduler}
elif obj_type == RSC:
o = self.resources
if id:
if id in o:
return [o[id].attributes]
else:
return None
return [h.attributes for h in o.values()]
return []
else:
self.logger.error(self.logprefix + "unrecognized object type")
raise PbsStatusError(rc=-1, rv=[],
msg="unrecognized object type")
return None
# as_script is used to circumvent some shells that will not pass
# along environment variables when invoking a command through sudo
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif obj_type == RESV and not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(tgt, pcmd, runas=runas, as_script=as_script,
level=logging.INFOCLI, logerr=logerr)
o = ret['out']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if ret['rc'] != 0:
raise PbsStatusError(rc=ret['rc'], rv=[], msg=self.geterrmsg())
bsl = self.utils.convert_to_dictlist(o, attrib, mergelines=True)
# 7- Stat with impersonation over PBS IFL swig-wrapped API
elif runas is not None:
_data = {'obj_type': obj_type, 'attrib': attrib, 'id': id}
bsl = self.pbs_api_as('status', user=runas, data=_data,
extend=extend)
else:
# 8- Stat over PBS IFL API
#
# resources are special attributes, all resources are queried as
# a single attribute.
# e.g. querying the resources_available attribute returns all
# resources such as ncpus, mem etc. when querying for
# resources_available.ncpus and resources_available.mem only query
# resources_available once and retrieve the resources desired from
# there
if isinstance(attrib, dict):
attribcopy = {}
restype = []
for k, v in attrib.items():
if isinstance(v, tuple):
# SET requires a special handling because status may
# have been called through counter to count the number
# of objects have a given attribute set, in this case
# we set the attribute to an empty string rather than
# the number of elements requested. This is a
# side-effect of the way pbs_statjob works
if v[0] in (SET, MATCH_RE):
v = ''
else:
v = v[1]
if callable(v):
v = ''
if '.' in k:
_r = k.split('.')[0]
if _r not in restype:
attribcopy[k] = v
restype.append(_r)
else:
attribcopy[k] = v
elif isinstance(attrib, list):
attribcopy = []
for k in attrib:
if '.' in k:
_found = False
for _e in attribcopy:
_r = k.split('.')[0]
if _r == _e.split('.')[0]:
_found = True
break
if not _found:
attribcopy.append(k)
else:
attribcopy.append(k)
else:
attribcopy = attrib
a = self.utils.convert_to_attrl(attribcopy)
c = self._connect(self.hostname)
if obj_type == JOB:
bs = pbs_statjob(c, id, a, extend)
elif obj_type == QUEUE:
bs = pbs_statque(c, id, a, extend)
elif obj_type == SERVER:
bs = pbs_statserver(c, a, extend)
elif obj_type == HOST:
bs = pbs_statnode(c, id, a, extend)
elif obj_type == VNODE:
bs = pbs_statvnode(c, id, a, extend)
elif obj_type == RESV:
bs = pbs_statresv(c, id, a, extend)
elif obj_type == SCHED:
bs = pbs_statsched(c, a, extend)
elif obj_type == RSC:
# up to PBSPro 12.3 pbs_statrsc was not in pbs_ifl.h
bs = pbs_statrsc(c, id, a, extend)
elif obj_type in (HOOK, PBS_HOOK):
if os.getuid() != 0:
try:
rc = self.manager(MGR_CMD_LIST, obj_type, attrib,
id, level=level)
if rc == 0:
if id:
if (obj_type == HOOK and
id in self.hooks):
return [self.hooks[id].attributes]
elif (obj_type == PBS_HOOK and
id in self.pbshooks):
return [self.pbshooks[id].attributes]
else:
return None
if obj_type == HOOK:
return [h.attributes for h in
self.hooks.values()]
elif obj_type == PBS_HOOK:
return [h.attributes for h in
self.pbshooks.values()]
except:
pass
else:
bs = pbs_stathook(c, id, a, extend)
else:
self.logger.error(self.logprefix +
"unrecognized object type " + str(obj_type))
freebs = True
err = self.geterrmsg()
self._disconnect(c)
if err:
raise PbsStatusError(rc=-1, rv=[], msg=err)
if not isinstance(bs, list):
bsl = self.utils.batch_status_to_dictlist(bs, attrib)
else:
bsl = self.utils.filter_batch_status(bs, attrib)
# Update each object's dictionary with corresponding attributes and
# values
self.update_attributes(obj_type, bsl)
# Hook stat is done through CLI, no need to free the batch_status
if (not isinstance(bs, list) and freebs and
obj_type not in (HOOK, PBS_HOOK) and os.getuid() != 0):
pbs_statfree(bs)
# 9- Resolve indirect resources
if obj_type in (NODE, VNODE) and resolve_indirectness:
nodes = {}
for _b in bsl:
for k, v in _b.items():
if v.startswith('@'):
if v[1:] in nodes:
_b[k] = nodes[v[1:]][k]
else:
for l in bsl:
if l['id'] == v[1:]:
nodes[k] = l[k]
_b[k] = l[k]
break
del nodes
return bsl
def submit_interactive_job(self, job, cmd):
"""
submit an ``interactive`` job. Returns a job identifier
or raises PbsSubmitError on error
:param cmd: The command to run to submit the interactive
job
:type cmd: str
:param job: the job object. The job must have the attribute
'interactive_job' populated. That attribute is
a list of tuples of the form:
(<command>, <expected output>, <...>)
for example to send the command
hostname and expect 'myhost.mydomain' one would
set:job.interactive_job =
[('hostname', 'myhost.mydomain')]
If more than one lines are expected they are
appended to the tuple.
:raises: PbsSubmitError
"""
ij = InteractiveJob(job, cmd, self.hostname)
# start the interactive job submission thread and wait to pickup the
# actual job identifier
ij.start()
while ij.jobid is None:
continue
return ij.jobid
def submit(self, obj, script=None, extend=None, submit_dir=None):
"""
Submit a job or reservation. Returns a job identifier
or raises PbsSubmitError on error
:param obj: The Job or Reservation instance to submit
:param script: Path to a script to submit. Default: None
as an executable\ /bin/sleep 100 is submitted
:type script: str or None
:param extend: Optional extension to the IFL call.
see pbs_ifl.h
:type extend: str or None
:param submit_dir: directory from which job is submitted.
Defaults to temporary directory
:type submit_dir: str or None
:raises: PbsSubmitError
"""
_interactive_job = False
as_script = False
rc = None
if isinstance(obj, Job):
if script is None and obj.script is not None:
script = obj.script
if ATTR_inter in obj.attributes:
_interactive_job = True
if ATTR_executable in obj.attributes:
del obj.attributes[ATTR_executable]
if ATTR_Arglist in obj.attributes:
del obj.attributes[ATTR_Arglist]
elif not isinstance(obj, Reservation):
m = self.logprefix + "unrecognized object type"
self.logger.error(m)
return None
if submit_dir is None:
submit_dir = tempfile.gettempdir()
cwd = os.getcwd()
os.chdir(submit_dir)
c = None
# 1- Submission using the command line tools
if self.get_op_mode() == PTL_CLI:
exclude_attrs = [] # list of attributes to not convert to CLI
if isinstance(obj, Job):
runcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qsub')]
elif isinstance(obj, Reservation):
runcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rsub')]
if ATTR_resv_start in obj.custom_attrs:
start = obj.custom_attrs[ATTR_resv_start]
obj.custom_attrs[ATTR_resv_start] = \
self.utils.convert_seconds_to_resvtime(start)
if ATTR_resv_end in obj.custom_attrs:
end = obj.custom_attrs[ATTR_resv_end]
obj.custom_attrs[ATTR_resv_end] = \
self.utils.convert_seconds_to_resvtime(end)
if ATTR_resv_timezone in obj.custom_attrs:
exclude_attrs += [ATTR_resv_timezone, ATTR_resv_standing]
# handling of impersonation differs widely across OS's,
# when setting PBS_TZID we standardize on running the cmd
# as a script instead of customizing for each OS flavor
_tz = obj.custom_attrs[ATTR_resv_timezone]
runcmd = ['PBS_TZID=' + _tz] + runcmd
as_script = True
if ATTR_resv_rrule in obj.custom_attrs:
_rrule = obj.custom_attrs[ATTR_resv_rrule]
if _rrule[0] not in ("'", '"'):
_rrule = "'" + _rrule + "'"
obj.custom_attrs[ATTR_resv_rrule] = _rrule
if not self._is_local:
if ATTR_queue not in obj.attributes:
runcmd += ['-q@' + self.hostname]
elif '@' not in obj.attributes[ATTR_queue]:
curq = obj.attributes[ATTR_queue]
runcmd += ['-q' + curq + '@' + self.hostname]
if obj.custom_attrs and (ATTR_queue in obj.custom_attrs):
del obj.custom_attrs[ATTR_queue]
_conf = self.default_client_pbs_conf
cmd = self.utils.convert_to_cli(obj.custom_attrs, IFL_SUBMIT,
self.hostname, dflt_conf=_conf,
exclude_attrs=exclude_attrs)
if cmd is None:
try:
os.chdir(cwd)
except OSError:
pass
return None
runcmd += cmd
if script:
runcmd += [script]
else:
if ATTR_executable in obj.attributes:
runcmd += ['--', obj.attributes[ATTR_executable]]
if ((ATTR_Arglist in obj.attributes) and
(obj.attributes[ATTR_Arglist] is not None)):
args = obj.attributes[ATTR_Arglist]
arglist = self.utils.convert_arglist(args)
if arglist is None:
try:
os.chdir(cwd)
except OSError:
pass
return None
runcmd += [arglist]
if obj.username != self.current_user:
runas = obj.username
else:
runas = None
if _interactive_job:
ijid = self.submit_interactive_job(obj, runcmd)
try:
os.chdir(cwd)
except OSError:
pass
return ijid
if not self.default_client_pbs_conf:
runcmd = [
'PBS_CONF_FILE=' + self.client_pbs_conf_file] + runcmd
as_script = True
ret = self.du.run_cmd(self.client, runcmd, runas=runas,
level=logging.INFOCLI, as_script=as_script,
logerr=False)
if ret['rc'] != 0:
objid = None
else:
objid = ret['out'][0]
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc = ret['rc']
# 2- Submission with impersonation over API
elif obj.username != self.current_user:
# submit job as a user requires setting uid to that user. It's
# done in a separate process
obj.set_variable_list(obj.username, submit_dir)
obj.set_attributes()
if (obj.script is not None and not self._is_local):
# This copy assumes that the file system layout on the
# remote host is identical to the local host. When not
# the case, this code will need to be updated to copy
# to a known remote location and update the obj.script
self.du.run_copy(self.hostname, obj.script, obj.script)
os.remove(obj.script)
objid = self.pbs_api_as('submit', obj, user=obj.username,
extend=extend)
# 3- Submission as current user over API
else:
c = self._connect(self.hostname)
if isinstance(obj, Job):
if script:
if ATTR_o not in obj.attributes:
obj.attributes[ATTR_o] = (self.hostname + ':' +
obj.script + '.o')
if ATTR_e not in obj.attributes:
obj.attributes[ATTR_e] = (self.hostname + ':' +
obj.script + '.e')
sc = os.path.basename(script)
obj.unset_attributes([ATTR_executable, ATTR_Arglist])
if ATTR_N not in obj.custom_attrs:
obj.attributes[ATTR_N] = sc
if ATTR_queue in obj.attributes:
destination = obj.attributes[ATTR_queue]
# queue must be removed otherwise will cause the submit
# to fail silently
del obj.attributes[ATTR_queue]
else:
destination = None
if (ATTR_o not in obj.attributes or
ATTR_e not in obj.attributes):
fn = self.utils.random_str(
length=4, prefix='PtlPbsJob')
tmp = self.du.get_tempdir(self.hostname)
fn = os.path.join(tmp, fn)
if ATTR_o not in obj.attributes:
obj.attributes[ATTR_o] = (self.hostname + ':' +
fn + '.o')
if ATTR_e not in obj.attributes:
obj.attributes[ATTR_e] = (self.hostname + ':' +
fn + '.e')
obj.attropl = self.utils.dict_to_attropl(obj.attributes)
objid = pbs_submit(c, obj.attropl, script, destination,
extend)
elif isinstance(obj, Reservation):
if ATTR_resv_duration in obj.attributes:
# reserve_duration is not a valid attribute, the API call
# will get rejected if it is used
wlt = ATTR_l + '.walltime'
obj.attributes[wlt] = obj.attributes[ATTR_resv_duration]
del obj.attributes[ATTR_resv_duration]
obj.attropl = self.utils.dict_to_attropl(obj.attributes)
objid = pbs_submit_resv(c, obj.attropl, extend)
prefix = 'submit to ' + self.shortname + ' as '
if isinstance(obj, Job):
self.logit(prefix + '%s: ' % obj.username, JOB, obj.custom_attrs,
objid)
if obj.script_body:
self.logger.log(logging.INFOCLI, 'job script ' + script +
'\n---\n' + obj.script_body + '\n---')
if objid is not None:
self.jobs[objid] = obj
elif isinstance(obj, Reservation):
# Reservations without -I option return as 'R123 UNCONFIRMED'
# so split to get the R123 only
self.logit(prefix + '%s: ' % obj.username, RESV, obj.attributes,
objid)
if objid is not None:
objid = objid.split()[0]
self.reservations[objid] = obj
if objid is not None:
obj.server[self.hostname] = objid
else:
try:
os.chdir(cwd)
except OSError:
pass
raise PbsSubmitError(rc=rc, rv=None, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
try:
os.chdir(cwd)
except OSError:
pass
return objid
def deljob(self, id=None, extend=None, runas=None, wait=False,
logerr=True, attr_W=None):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeljobError`` on error
:param id: The identifier(s) of the jobs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param wait: Set to True to wait for job(s) to no longer
be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:param attr_w: -W args to qdel (Only for cli mode)
:type attr_w: str
:raises: PbsDeljobError
"""
prefix = 'delete job on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ', '.join(id)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qdel')]
if extend is not None:
pcmd += self.utils.convert_to_cli(extend, op=IFL_DELETE,
hostname=self.hostname)
if attr_W is not None:
pcmd += ['-W']
if attr_W != PTL_NOARG:
pcmd += [attr_W]
if id is not None:
pcmd += id
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, logerr=logerr,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('deljob', id, user=runas, extend=extend)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in id:
tmp_rc = pbs_deljob(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsDeljobError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if self.jobs is not None:
for j in id:
if j in self.jobs:
if self.jobs[j].interactive_handle is not None:
self.jobs[j].interactive_handle.close()
del self.jobs[j]
if c:
self._disconnect(c)
if wait:
for oid in id:
self.expect(JOB, 'queue', id=oid, op=UNSET, runas=runas,
level=logging.DEBUG)
return rc
def delresv(self, id=None, extend=None, runas=None, wait=False,
logerr=True):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeljobError`` on error
:param id: The identifier(s) of the jobs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param wait: Set to True to wait for job(s) to no longer
be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:raises: PbsDeljobError
"""
prefix = 'delete resv on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ', '.join(id)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'pbs_rdel')]
if id is not None:
pcmd += id
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
elif not self._is_local:
pcmd = ['PBS_SERVER=' + self.hostname] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, logerr=logerr,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('delresv', id, user=runas, extend=extend)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in id:
tmp_rc = pbs_delresv(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsDelresvError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if self.reservations is not None:
for j in id:
if j in self.reservations:
del self.reservations[j]
if c:
self._disconnect(c)
if wait:
for oid in id:
self.expect(RESV, 'queue', id=oid, op=UNSET, runas=runas,
level=logging.DEBUG)
return rc
def delete(self, id=None, extend=None, runas=None, wait=False,
logerr=True):
"""
delete a single job or list of jobs specified by id
raises ``PbsDeleteError`` on error
:param id: The identifier(s) of the jobs/resvs to delete
:type id: str or list
:param extend: Optional parameters to pass along to PBS
:type extend: str or none
:param runas: run as user
:type runas: str
:param wait: Set to True to wait for job(s)/resv(s) to
no longer be reported by PBS. False by default
:type wait: bool
:param logerr: Whether to log errors. Defaults to True.
:type logerr: bool
:raises: PbsDeleteError
"""
prefix = 'delete on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if id is not None:
if not isinstance(id, list):
id = id.split(',')
prefix += ','.join(id)
if extend is not None:
prefix += ' with ' + str(extend)
self.logger.info(prefix)
if not len(id) > 0:
return 0
obj_type = {}
for j in id:
if j[0] in ('R', 'S'):
obj_type[j] = RESV
try:
rc = self.delresv(j, extend, runas, logerr=logerr)
except PbsDelresvError, e:
rc = e.rc
msg = e.msg
rv = e.rv
else:
obj_type[j] = JOB
try:
rc = self.deljob(j, extend, runas, logerr=logerr)
except PbsDeljobError, e:
rc = e.rc
msg = e.msg
rv = e.rv
if rc != 0:
raise PbsDeleteError(rc=rc, rv=rv, msg=msg)
if wait:
for oid in id:
self.expect(obj_type[oid], 'queue', id=oid, op=UNSET,
runas=runas, level=logging.DEBUG)
return rc
def select(self, attrib=None, extend=None, runas=None, logerr=True):
"""
Select jobs that match attributes list or all jobs if no
attributes raises ``PbsSelectError`` on error
:param attrib: A string, list, or dictionary of attributes
:type attrib: str or list or dictionary
:param extend: the extended attributes to pass to select
:type extend: str or None
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:returns: A list of job identifiers that match the
attributes specified
:raises: PbsSelectError
"""
prefix = "select on " + self.shortname
if runas is not None:
prefix += " as " + str(runas)
prefix += ": "
if attrib is None:
s = PTL_ALL
elif not isinstance(attrib, dict):
self.logger.error(prefix + "attributes must be a dictionary")
return
else:
s = str(attrib)
self.logger.info(prefix + s)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'],
'bin', 'qselect')]
cmd = self.utils.convert_to_cli(attrib, op=IFL_SELECT,
hostname=self.hostname)
if extend is not None:
pcmd += ['-' + extend]
if not self._is_local and ((attrib is None) or
(ATTR_queue not in attrib)):
pcmd += ['-q', '@' + self.hostname]
pcmd += cmd
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsSelectError(rc=self.last_rc, rv=False,
msg=self.geterrmsg())
jobs = ret['out']
# command returns no jobs as empty, since we expect a valid id,
# we reset the jobs to an empty array
if len(jobs) == 1 and jobs[0] == '':
jobs = []
elif runas is not None:
jobs = self.pbs_api_as('select', user=runas, data=attrib,
extend=extend)
else:
attropl = self.utils.convert_to_attropl(attrib, op=EQ)
c = self._connect(self.hostname)
jobs = pbs_selectjob(c, attropl, extend)
err = self.geterrmsg()
if err:
raise PbsSelectError(rc=-1, rv=False, msg=err,
post=self._disconnect, conn=c)
self._disconnect(c)
return jobs
def selstat(self, select_list, rattrib, runas=None, extend=None):
"""
stat and filter jobs attributes.
:param select_list: The filter criteria
:type select: List
:param rattrib: The attributes to query
:type rattrib: List
:param runas: run as user
:type runas: str or None
.. note:: No ``CLI`` counterpart for this call
"""
attrl = self.utils.convert_to_attrl(rattrib)
attropl = self.utils.convert_to_attropl(select_list)
c = self._connect(self.hostname)
bs = pbs_selstat(c, attropl, attrl, extend)
self._disconnect(c)
return bs
def manager(self, cmd, obj_type, attrib=None, id=None, extend=None,
expect=False, max_attempts=None, level=logging.INFO,
sudo=None, runas=None, logerr=True):
"""
issue a management command to the server, e.g to set an
attribute
Returns the return code of ``qmgr/pbs_manager()`` on
success, if expect is set to True, the return value is
that of the call to expect.Raises ``PbsManagerError`` on
error
:param cmd: The command to issue,
``MGR_CMD_[SET,UNSET, LIST,...]`` see pbs_ifl.h
:type cmd: str
:param obj_type: The type of object to query, one of
the * objects
:param attrib: Attributes to operate on, can be a string, a
list,a dictionary
:type attrib: str or list or dictionary
:param id: The name or list of names of the object(s) to act
upon.
:type id: str or list
:param extend: Optional extension to the IFL call. see
pbs_ifl.h
:type extend: str or None
:param expect: If set to True, query the server expecting
the value to be\ accurately reflected.
Defaults to False
:type expect: bool
:param max_attempts: Sets a maximum number of attempts to
call expect with.
:type max_attempts: int
:param level: logging level
:param sudo: If True, run the manager command as super user.
Defaults to None. Some attribute settings
should be run with sudo set to True, those are
acl_roots, job_sort_formula, hook operations,
no_sched_hook_event, in those cases, setting
sudo to False is only needed for testing
purposes
:type sudo: bool
:param runas: run as user
:type runas: str
:param logerr: If False, CLI commands do not log error,
i.e. silent mode
:type logerr: bool
:raises: PbsManagerError
When expect is ``False``, return the value, ``0/!0``
returned by pbs_manager
When expect is ``True``, return the value, ``True/False``,
returned by expect
"""
if isinstance(id, str):
oid = id.split(',')
else:
oid = id
self.logit('manager on ' + self.shortname +
[' as ' + str(runas), ''][runas is None] + ': ' +
PBS_CMD_MAP[cmd] + ' ', obj_type, attrib, oid, level=level)
c = None # connection handle
if (self.get_op_mode() == PTL_CLI or
sudo is not None or
obj_type in (HOOK, PBS_HOOK) or
(attrib is not None and ('job_sort_formula' in attrib or
'acl_roots' in attrib or
'no_sched_hook_event' in attrib))):
execcmd = [PBS_CMD_MAP[cmd], PBS_OBJ_MAP[obj_type]]
if oid is not None:
if cmd == MGR_CMD_DELETE and obj_type == NODE and oid[0] == "":
oid[0] = "@default"
execcmd += [",".join(oid)]
if attrib is not None and cmd != MGR_CMD_LIST:
if cmd == MGR_CMD_IMPORT:
execcmd += [attrib['content-type'],
attrib['content-encoding'],
attrib['input-file']]
else:
if isinstance(attrib, (dict, OrderedDict)):
kvpairs = []
for k, v in attrib.items():
if isinstance(v, tuple):
if v[0] == INCR:
op = '+='
elif v[0] == DECR:
op = '-='
else:
msg = 'Invalid operation: %s' % (v[0])
raise PbsManagerError(rc=1, rv=False,
msg=msg)
v = v[1]
else:
op = '='
# handle string arrays as double quotes if
# not already set:
if isinstance(v, str) and ',' in v and v[0] != '"':
v = '"' + v + '"'
kvpairs += [str(k) + op + str(v)]
if kvpairs:
execcmd += [",".join(kvpairs)]
del kvpairs
elif isinstance(attrib, list):
execcmd += [",".join(attrib)]
elif isinstance(attrib, str):
execcmd += [attrib]
if not self.default_pbs_conf or not self.default_client_pbs_conf:
as_script = True
else:
as_script = False
if not self._is_local or as_script:
execcmd = '\'' + " ".join(execcmd) + '\''
else:
execcmd = " ".join(execcmd)
# Hooks can only be queried as a privileged user on the host where
# the server is running, care must be taken to use the appropriate
# path to qmgr and appropriate escaping sequences
# VERSION INFO: no_sched_hook_event introduced in 11.3.120 only
if sudo is None:
if (obj_type in (HOOK, PBS_HOOK) or
(attrib is not None and
('job_sort_formula' in attrib or
'acl_roots' in attrib or
'no_sched_hook_event' in attrib))):
sudo = True
else:
sudo = False
pcmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'bin', 'qmgr'),
'-c', execcmd]
if as_script:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
ret = self.du.run_cmd(self.hostname, pcmd, sudo=sudo, runas=runas,
level=logging.INFOCLI, as_script=as_script,
logerr=logerr)
rc = ret['rc']
# NOTE: workaround the fact that qmgr overloads the return code in
# cases where the list returned is empty an error flag is set even
# through there is no error. Handled here by checking if there is
# no err and out message, in which case return code is set to 0
if rc != 0 and (ret['out'] == [''] and ret['err'] == ['']):
rc = 0
if rc == 0:
if cmd == MGR_CMD_LIST:
bsl = self.utils.convert_to_dictlist(ret['out'], attrib,
mergelines=False)
self.update_attributes(obj_type, bsl)
else:
# Need to rework setting error, this is not thread safe
self.last_error = ret['err']
self.last_rc = ret['rc']
elif runas is not None:
_data = {'cmd': cmd, 'obj_type': obj_type, 'attrib': attrib,
'id': oid}
rc = self.pbs_api_as('manager', user=runas, data=_data,
extend=extend)
else:
a = self.utils.convert_to_attropl(attrib, cmd)
c = self._connect(self.hostname)
rc = 0
if obj_type == SERVER and oid is None:
oid = [self.hostname]
if oid is None:
# server will run strlen on id, it can not be NULL
oid = ['']
if cmd == MGR_CMD_LIST:
if oid is None:
bsl = self.status(obj_type, attrib, oid, extend)
else:
bsl = None
for i in oid:
tmpbsl = self.status(obj_type, attrib, i, extend)
if tmpbsl is None:
rc = 1
else:
if bsl is None:
bsl = tmpbsl
else:
bsl += tmpbsl
else:
rc = 0
if oid is None:
rc = pbs_manager(c, cmd, obj_type, i, a, extend)
else:
for i in oid:
tmprc = pbs_manager(c, cmd, obj_type, i, a, extend)
if tmprc != 0:
rc = tmprc
break
if rc == 0:
rc = tmprc
if cmd == MGR_CMD_DELETE and oid is not None:
for i in oid:
if obj_type == MGR_OBJ_HOOK and i in self.hooks:
del self.hooks[i]
if obj_type in (NODE, VNODE) and i in self.nodes:
del self.nodes[i]
if obj_type == MGR_OBJ_QUEUE and i in self.queues:
del self.queues[i]
if obj_type == MGR_OBJ_RSC and i in self.resources:
del self.resources[i]
if rc != 0:
raise PbsManagerError(rv=False, rc=rc, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c is not None:
self._disconnect(c)
if expect:
offset = None
if obj_type in (NODE, HOST):
obj_type = VNODE
if obj_type in (VNODE, QUEUE):
offset = 0.5
if cmd in PBS_CMD_TO_OP:
op = PBS_CMD_TO_OP[cmd]
else:
op = EQ
if oid is None:
return self.expect(obj_type, attrib, oid, op=op,
max_attempts=max_attempts, offset=offset)
for i in oid:
rc = self.expect(obj_type, attrib, i, op=op,
max_attempts=max_attempts, offset=offset)
if not rc:
break
return rc
def sigjob(self, jobid=None, signal=None, extend=None, runas=None,
logerr=True):
"""
Send a signal to a job. Raises ``PbsSignalError`` on error.
:param jobid: identifier of the job or list of jobs to send
the signal to
:type jobid: str or list
:param signal: The signal to send to the job, see pbs_ifl.h
:type signal: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsSignalError
"""
prefix = 'signal on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if signal is not None:
prefix += ' with signal = ' + str(signal)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qsig')]
if signal is not None:
pcmd += ['-s']
if signal != PTL_NOARG:
pcmd += [str(signal)]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('sigjob', jobid, runas, data=signal)
else:
c = self._connect(self.hostname)
rc = 0
for ajob in jobid:
tmp_rc = pbs_sigjob(c, ajob, signal, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsSignalError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
<|fim▁hole|> self._disconnect(c)
return rc
def msgjob(self, jobid=None, to_file=None, msg=None, extend=None,
runas=None, logerr=True):
"""
Send a message to a job. Raises ``PbsMessageError`` on
error.
:param jobid: identifier of the job or list of jobs to
send the message to
:type jobid: str or List
:param msg: The message to send to the job
:type msg: str or None
:param to_file: one of ``MSG_ERR`` or ``MSG_OUT`` or
``MSG_ERR|MSG_OUT``
:type to_file: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsMessageError
"""
prefix = 'msgjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if to_file is not None:
prefix += ' with to_file = '
if MSG_ERR == to_file:
prefix += 'MSG_ERR'
elif MSG_OUT == to_file:
prefix += 'MSG_OUT'
elif MSG_OUT | MSG_ERR == to_file:
prefix += 'MSG_ERR|MSG_OUT'
else:
prefix += str(to_file)
if msg is not None:
prefix += ' msg = %s' % (str(msg))
if extend is not None:
prefix += ' extend = %s' % (str(extend))
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmsg')]
if to_file is not None:
if MSG_ERR == to_file:
pcmd += ['-E']
elif MSG_OUT == to_file:
pcmd += ['-O']
elif MSG_OUT | MSG_ERR == to_file:
pcmd += ['-E', '-O']
else:
pcmd += ['-' + str(to_file)]
if msg is not None:
pcmd += [msg]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
data = {'msg': msg, 'to_file': to_file}
rc = self.pbs_api_as('msgjob', jobid, runas, data=data,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
for ajob in jobid:
tmp_rc = pbs_msgjob(c, ajob, to_file, msg, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsMessageError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def alterjob(self, jobid=None, attrib=None, extend=None, runas=None,
logerr=True):
"""
Alter attributes associated to a job. Raises
``PbsAlterError`` on error.
:param jobid: identifier of the job or list of jobs to
operate on
:type jobid: str or list
:param attrib: A dictionary of attributes to set
:type attrib: dictionary
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If False, CLI commands do not log error,
i.e. silent mode
:type logerr: bool
:raises: PbsAlterError
"""
prefix = 'alter on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if attrib is not None:
prefix += ' %s' % (str(attrib))
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qalter')]
if attrib is not None:
_conf = self.default_client_pbs_conf
pcmd += self.utils.convert_to_cli(attrib, op=IFL_ALTER,
hostname=self.client,
dflt_conf=_conf)
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('alterjob', jobid, runas, data=attrib)
else:
c = self._connect(self.hostname)
if c < 0:
return c
a = self.utils.convert_to_attrl(attrib)
rc = 0
for ajob in jobid:
tmp_rc = pbs_alterjob(c, ajob, a, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsAlterError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def holdjob(self, jobid=None, holdtype=None, extend=None, runas=None,
logerr=True):
"""
Hold a job. Raises ``PbsHoldError`` on error.
:param jobid: identifier of the job or list of jobs to hold
:type jobid: str or list
:param holdtype: The type of hold to put on the job
:type holdtype: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsHoldError
"""
prefix = 'holdjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if holdtype is not None:
prefix += ' with hold_list = %s' % (holdtype)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qhold')]
if holdtype is not None:
pcmd += ['-h']
if holdtype != PTL_NOARG:
pcmd += [holdtype]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
logerr=logerr, as_script=as_script,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('holdjob', jobid, runas, data=holdtype,
logerr=logerr)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_holdjob(c, ajob, holdtype, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsHoldError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def rlsjob(self, jobid, holdtype, extend=None, runas=None, logerr=True):
"""
Release a job. Raises ``PbsReleaseError`` on error.
:param jobid: job or list of jobs to release
:type jobid: str or list
:param holdtype: The type of hold to release on the job
:type holdtype: str
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsReleaseError
"""
prefix = 'release on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if holdtype is not None:
prefix += ' with hold_list = %s' % (holdtype)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qrls')]
if holdtype is not None:
pcmd += ['-h']
if holdtype != PTL_NOARG:
pcmd += [holdtype]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('rlsjob', jobid, runas, data=holdtype)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_rlsjob(c, ajob, holdtype, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsHoldError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def rerunjob(self, jobid=None, extend=None, runas=None, logerr=True):
"""
Rerun a job. Raises ``PbsRerunError`` on error.
:param jobid: job or list of jobs to release
:type jobid: str or list
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsRerunError
"""
prefix = 'rerun on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if extend is not None:
prefix += extend
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qrerun')]
if extend:
pcmd += ['-W', extend]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('rerunjob', jobid, runas, extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
tmp_rc = pbs_rerunjob(c, ajob, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsRerunError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def orderjob(self, jobid1=None, jobid2=None, extend=None, runas=None,
logerr=True):
"""
reorder position of ``jobid1`` and ``jobid2``. Raises
``PbsOrderJob`` on error.
:param jobid1: first jobid
:type jobid1: str or None
:param jobid2: second jobid
:type jobid2: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsOrderJob
"""
prefix = 'orderjob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
prefix += str(jobid1) + ', ' + str(jobid2)
if extend is not None:
prefix += ' ' + str(extend)
self.logger.info(prefix)
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qorder')]
if jobid1 is not None:
pcmd += [jobid1]
if jobid2 is not None:
pcmd += [jobid2]
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('orderjob', jobid1, runas, data=jobid2,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = pbs_orderjob(c, jobid1, jobid2, extend)
if rc != 0:
raise PbsOrderError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def runjob(self, jobid=None, location=None, async=False, extend=None,
runas=None, logerr=False):
"""
Run a job on given nodes. Raises ``PbsRunError`` on error.
:param jobid: job or list of jobs to run
:type jobid: str or list
:param location: An execvnode on which to run the job
:type location: str or None
:param async: If true the call will return immediately
assuming success.
:type async: bool
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsRunError
"""
if async:
prefix = 'Async run on ' + self.shortname
else:
prefix = 'run on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if location is not None:
prefix += ' with location = %s' % (location)
self.logger.info(prefix)
if self.has_diag:
return 0
c = None
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qrun')]
if async:
pcmd += ['-a']
if location is not None:
pcmd += ['-H']
if location != PTL_NOARG:
pcmd += [location]
if jobid:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as(
'runjob', jobid, runas, data=location, extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
rc = 0
for ajob in jobid:
if async:
tmp_rc = pbs_asyrunjob(c, ajob, location, extend)
else:
tmp_rc = pbs_runjob(c, ajob, location, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsRunError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def movejob(self, jobid=None, destination=None, extend=None, runas=None,
logerr=True):
"""
Move a job or list of job ids to a given destination queue.
Raises ``PbsMoveError`` on error.
:param jobid: A job or list of job ids to move
:type jobid: str or list
:param destination: The destination queue@server
:type destination: str or None
:param extend: extend options
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsMoveError
"""
prefix = 'movejob on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if jobid is not None:
if not isinstance(jobid, list):
jobid = jobid.split(',')
prefix += ', '.join(jobid)
if destination is not None:
prefix += ' destination = %s' % (destination)
self.logger.info(prefix)
c = None
rc = 0
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qmove')]
if destination is not None:
pcmd += [destination]
if jobid is not None:
pcmd += jobid
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
logerr=logerr, as_script=as_script,
level=logging.INFOCLI)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
rc = self.pbs_api_as('movejob', jobid, runas, data=destination,
extend=extend)
else:
c = self._connect(self.hostname)
if c < 0:
return c
for ajob in jobid:
tmp_rc = pbs_movejob(c, ajob, destination, extend)
if tmp_rc != 0:
rc = tmp_rc
if rc != 0:
raise PbsMoveError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c)
if c:
self._disconnect(c)
return rc
def qterm(self, manner=None, extend=None, server_name=None, runas=None,
logerr=True):
"""
Terminate the ``pbs_server`` daemon
:param manner: one of ``(SHUT_IMMEDIATE | SHUT_DELAY |
SHUT_QUICK)`` and can be\
combined with SHUT_WHO_SCHED, SHUT_WHO_MOM,
SHUT_WHO_SECDRY, \
SHUT_WHO_IDLESECDRY, SHUT_WHO_SECDONLY. \
:param extend: extend options
:param server_name: name of the pbs server
:type server_name: str or None
:param runas: run as user
:type runas: str or None
:param logerr: If True (default) logs run_cmd errors
:type logerr: bool
:raises: PbsQtermError
"""
prefix = 'terminate ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': with manner '
attrs = manner
if attrs is None:
prefix += "None "
elif isinstance(attrs, str):
prefix += attrs
else:
if ((attrs & SHUT_QUICK) == SHUT_QUICK):
prefix += "quick "
if ((attrs & SHUT_IMMEDIATE) == SHUT_IMMEDIATE):
prefix += "immediate "
if ((attrs & SHUT_DELAY) == SHUT_DELAY):
prefix += "delay "
if ((attrs & SHUT_WHO_SCHED) == SHUT_WHO_SCHED):
prefix += "schedular "
if ((attrs & SHUT_WHO_MOM) == SHUT_WHO_MOM):
prefix += "mom "
if ((attrs & SHUT_WHO_SECDRY) == SHUT_WHO_SECDRY):
prefix += "secondary server "
if ((attrs & SHUT_WHO_IDLESECDRY) == SHUT_WHO_IDLESECDRY):
prefix += "idle secondary "
if ((attrs & SHUT_WHO_SECDONLY) == SHUT_WHO_SECDONLY):
prefix += "shoutdown secondary only "
self.logger.info(prefix)
if self.has_diag:
return 0
c = None
rc = 0
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin', 'qterm')]
_conf = self.default_client_pbs_conf
pcmd += self.utils.convert_to_cli(manner, op=IFL_TERMINATE,
hostname=self.hostname,
dflt_conf=_conf)
if server_name is not None:
pcmd += [server_name]
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
level=logging.INFOCLI, as_script=as_script)
rc = ret['rc']
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = rc
elif runas is not None:
attrs = {'manner': manner, 'server_name': server_name}
rc = self.pbs_api_as('terminate', None, runas, data=attrs,
extend=extend)
else:
if server_name is None:
server_name = self.hostname
c = self._connect(self.hostname)
rc = pbs_terminate(c, manner, extend)
if rc != 0:
raise PbsQtermError(rc=rc, rv=False, msg=self.geterrmsg(),
post=self._disconnect, conn=c, force=True)
if c:
self._disconnect(c, force=True)
return rc
teminate = qterm
def geterrmsg(self):
"""
Get the error message
"""
mode = self.get_op_mode()
if mode == PTL_CLI:
return self.last_error
elif self._conn is not None and self._conn >= 0:
m = pbs_geterrmsg(self._conn)
if m is not None:
m = m.split('\n')
return m
#
# End IFL Wrappers
#
def qdisable(self, queue=None, runas=None, logerr=True):
"""
Disable queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to
disable
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQdisableError
"""
prefix = 'qdisable on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qdisable')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQdisableError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qdisable: currently not supported in API mode'
raise PbsQdisableError(rv=False, rc=1, msg=_msg)
def qenable(self, queue=None, runas=None, logerr=True):
"""
Enable queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to
enable
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQenableError
"""
prefix = 'qenable on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qenable')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQenableError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qenable: currently not supported in API mode'
raise PbsQenableError(rv=False, rc=1, msg=_msg)
def qstart(self, queue=None, runas=None, logerr=True):
"""
Start queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue
to start
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command
errors.Defaults to True.
:type logerr: bool
:raises: PbsQstartError
"""
prefix = 'qstart on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qstart')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQstartError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qstart: currently not supported in API mode'
raise PbsQstartError(rv=False, rc=1, msg=_msg)
def qstop(self, queue=None, runas=None, logerr=True):
"""
Stop queue. ``CLI`` mode only
:param queue: The name of the queue or list of queue to stop
:type queue: str or list
:param runas: Optional name of user to run command as
:type runas: str or None
:param logerr: Set to False ot disable logging command errors.
Defaults to True.
:type logerr: bool
:raises: PbsQstopError
"""
prefix = 'qstop on ' + self.shortname
if runas is not None:
prefix += ' as ' + str(runas)
prefix += ': '
if queue is not None:
if not isinstance(queue, list):
queue = queue.split(',')
prefix += ', '.join(queue)
self.logger.info(prefix)
if self.get_op_mode() == PTL_CLI:
pcmd = [os.path.join(self.client_conf['PBS_EXEC'], 'bin',
'qstop')]
if queue is not None:
pcmd += queue
if not self.default_client_pbs_conf:
pcmd = ['PBS_CONF_FILE=' + self.client_pbs_conf_file] + pcmd
as_script = True
else:
as_script = False
ret = self.du.run_cmd(self.client, pcmd, runas=runas,
as_script=as_script, level=logging.INFOCLI,
logerr=logerr)
if ret['err'] != ['']:
self.last_error = ret['err']
self.last_rc = ret['rc']
if self.last_rc != 0:
raise PbsQstopError(rc=self.last_rc, rv=False,
msg=self.last_error)
else:
_msg = 'qstop: currently not supported in API mode'
raise PbsQstopError(rv=False, rc=1, msg=_msg)
def parse_resources(self):
"""
Parse server resources as defined in the resourcedef file
Populates instance variable self.resources
:returns: The resources as a dictionary
"""
if not self.has_diag:
self.manager(MGR_CMD_LIST, RSC)
return self.resources
def remove_resource(self, name):
"""
Remove an entry from resourcedef
:param name: The name of the resource to remove
:type name: str
:param restart: Whether to restart the server or not.
Applicable to update_mode 'file'
operations only.
:param update_mode: one of 'file' or 'auto' (the default).
If 'file', updates the resourcedef file
only and will not use the qmgr
operations on resources introduced in
12.3. If 'auto', will automatically
handle the update on resourcedef or
using qmgr based on the version of the
Server.
"""
self.parse_resources()
if not self.has_diag:
if name in self.resources:
self.manager(MGR_CMD_DELETE, RSC, id=name)
def add_resource(self, name, type=None, flag=None):
"""
Define a server resource
:param name: The name of the resource to add to the
resourcedef file
:type name: str
:param type: The type of the resource, one of string,
long, boolean, float
:param flag: The target of the resource, one of n, h, q,
or none
:type flag: str or None
:param restart: Whether to restart the server after adding
a resource.Applicable to update_mode 'file'
operations only.
:param update_mode: one of 'file' or 'auto' (the default).
If 'file', updates the resourcedef file
only and will not use the qmgr
operations on resources introduced in
12.3. If 'auto', will automatically
handle the update on resourcedef or
using qmgr based on the version of the
Server.
:returns: True on success False on error
"""
rv = self.parse_resources()
if rv is None:
return False
resource_exists = False
if name in self.resources:
msg = [self.logprefix + "resource " + name]
if type:
msg += ["type: " + type]
if flag:
msg += ["flag: " + flag]
msg += [" already defined"]
self.logger.info(" ".join(msg))
(t, f) = (self.resources[name].type, self.resources[name].flag)
if type == t and flag == f:
return True
self.logger.info("resource: redefining resource " + name +
" type: " + str(type) + " and flag: " + str(flag))
del self.resources[name]
resource_exists = True
r = Resource(name, type, flag)
self.resources[name] = r
a = {}
if type:
a['type'] = type
if flag:
a['flag'] = flag
if resource_exists:
self.manager(MGR_CMD_SET, RSC, a, id=name)
else:
self.manager(MGR_CMD_CREATE, RSC, a, id=name)
return True
def write_resourcedef(self, resources=None, filename=None, restart=True):
"""
Write into resource def file
:param resources: PBS resources
:type resources: dictionary
:param filename: resourcedef file name
:type filename: str or None
"""
if resources is None:
resources = self.resources
if isinstance(resources, Resource):
resources = {resources.name: resources}
fn = self.du.mkstemp()[1]
f = open(fn, 'w+')
for r in resources.values():
f.write(r.attributes['id'])
if r.attributes['type'] is not None:
f.write(' type=' + r.attributes['type'])
if r.attributes['flag'] is not None:
f.write(' flag=' + r.attributes['flag'])
f.write('\n')
f.close()
if filename is None:
dest = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
else:
dest = filename
self.du.run_copy(self.hostname, fn, dest, mode=0644, sudo=True)
if filename is None:
self.du.chown(self.hostname, path=dest, uid=0, gid=0,
sudo=True)
os.remove(fn)
if restart:
return self.restart()
return True
def parse_resourcedef(self, file=None):
"""
Parse an arbitrary resource definition file passed as
input and return a dictionary of resources
:param file: resource definition file
:type file: str or None
:returns: Dictionary of resource
:raises: PbsResourceError
"""
if file is None:
file = os.path.join(self.pbs_conf['PBS_HOME'], 'server_priv',
'resourcedef')
ret = self.du.cat(self.hostname, file, logerr=False, sudo=True)
if ret['rc'] != 0 or len(ret['out']) == 0:
# Most probable error is that file does not exist, we'll let it
# be created
return {}
resources = {}
lines = ret['out']
try:
for l in lines:
l = l.strip()
if l == '' or l.startswith('#'):
continue
name = None
rtype = None
flag = None
res = l.split()
e0 = res[0]
if len(res) > 1:
e1 = res[1].split('=')
else:
e1 = None
if len(res) > 2:
e2 = res[2].split('=')
else:
e2 = None
if e1 is not None and e1[0] == 'type':
rtype = e1[1]
elif e2 is not None and e2[0] == 'type':
rtype = e2[1]
if e1 is not None and e1[0] == 'flag':
flag = e1[0]
elif e2 is not None and e2[0] == 'flag':
flag = e2[1]
name = e0
r = Resource(name, rtype, flag)
resources[name] = r
except:
raise PbsResourceError(rc=1, rv=False,
msg="error in parse_resources")
return resources
def pbs_api_as(self, cmd=None, obj=None, user=None, **kwargs):
"""
Generic handler to run an ``API`` call impersonating
a given user.This method is only used for impersonation
over the ``API`` because ``CLI`` impersonation takes place
through the generic ``DshUtils`` run_cmd mechanism.
:param cmd: PBS command
:type cmd: str or None
:param user: PBS user or current user
:type user: str or None
:raises: eval
"""
fn = None
objid = None
_data = None
if user is None:
user = self.du.get_current_user()
else:
# user may be a PbsUser object, cast it to string for the remainder
# of the function
user = str(user)
if cmd == 'submit':
if obj is None:
return None
_data = copy.copy(obj)
# the following attributes cause problems 'pickling',
# since they are not needed we unset them
_data.attrl = None
_data.attropl = None
_data.logger = None
_data.utils = None
elif cmd in ('alterjob', 'holdjob', 'sigjob', 'msgjob', 'rlsjob',
'rerunjob', 'orderjob', 'runjob', 'movejob',
'select', 'delete', 'status', 'manager', 'terminate',
'deljob', 'delresv'):
objid = obj
if 'data' in kwargs:
_data = kwargs['data']
if _data is not None:
(fd, fn) = self.du.mkstemp()
tmpfile = open(fn, 'w+b')
cPickle.dump(_data, tmpfile)
tmpfile.close()
os.close(fd)
os.chmod(fn, 0755)
if self._is_local:
os.chdir(tempfile.gettempdir())
else:
self.du.run_copy(self.hostname, fn, fn, sudo=True)
if not self._is_local:
p_env = '"import os; print os.environ[\'PTL_EXEC\']"'
ret = self.du.run_cmd(self.hostname, ['python', '-c', p_env],
logerr=False)
if ret['out']:
runcmd = [os.path.join(ret['out'][0], 'pbs_as')]
else:
runcmd = ['pbs_as']
elif 'PTL_EXEC' in os.environ:
runcmd = [os.path.join(os.environ['PTL_EXEC'], 'pbs_as')]
else:
runcmd = ['pbs_as']
runcmd += ['-c', cmd, '-u', user]
if objid is not None:
runcmd += ['-o']
if isinstance(objid, list):
runcmd += [','.join(objid)]
else:
runcmd += [objid]
if fn is not None:
runcmd += ['-f', fn]
if 'hostname' in kwargs:
hostname = kwargs['hostname']
else:
hostname = self.hostname
runcmd += ['-s', hostname]
if 'extend' in kwargs and kwargs['extend'] is not None:
runcmd += ['-e', kwargs['extend']]
ret = self.du.run_cmd(self.hostname, runcmd, logerr=False, runas=user)
out = ret['out']
if ret['err']:
if cmd in CMD_ERROR_MAP:
m = CMD_ERROR_MAP[cmd]
if m in ret['err'][0]:
if fn is not None:
os.remove(fn)
if not self._is_local:
self.du.rm(self.hostname, fn)
raise eval(str(ret['err'][0]))
self.logger.debug('err: ' + str(ret['err']))
if fn is not None:
os.remove(fn)
if not self._is_local:
self.du.rm(self.hostname, fn)
if cmd == 'submit':
if out:
return out[0].strip()
else:
return None
elif cmd in ('alterjob', 'holdjob', 'sigjob', 'msgjob', 'rlsjob',
'rerunjob', 'orderjob', 'runjob', 'movejob', 'delete',
'terminate'):
if ret['out']:
return int(ret['out'][0])
else:
return 1
elif cmd in ('manager', 'select', 'status'):
return eval(out[0])
def expect(self, obj_type, attrib=None, id=None, op=EQ, attrop=PTL_OR,
attempt=0, max_attempts=None, interval=None, count=None,
extend=None, offset=0, runas=None, level=logging.INFO,
msg=None):
"""
expect an attribute to match a given value as per an
operation.
:param obj_type: The type of object to query, JOB, SERVER,
SCHEDULER, QUEUE NODE
:type obj_type: str
:param attrib: Attributes to query, can be a string, a list,
or a dict
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param op: An operation to perform on the queried data,
e.g., EQ, SET, LT,..
:param attrop: Operation on multiple attributes, either
PTL_AND, PTL_OR when an PTL_AND is used, only
batch objects having all matches are
returned, otherwise an OR is applied
:param attempt: The number of times this function has been
called
:type attempt: int
:param max_attempts: The maximum number of attempts to
perform.C{param_max_attempts}: 5
:type max_attempts: int or None
:param interval: The interval time btween attempts.
C{param_interval}: 1s
:param count: If True, attrib will be accumulated using
function counter
:type count: bool
:param extend: passed to the stat call
:param offset: the time to wait before the initial check.
Defaults to 0.
:type offset: int
:param runas: query as a given user. Defaults to current
user
:type runas: str or None
:param msg: Message from last call of this function, this
message will be used while raising
PtlExpectError.
:type msg: str or None
:returns: True if attributes are as expected and False
otherwise
"""
if attempt == 0 and offset > 0:
self.logger.log(level, self.logprefix + 'expect offset set to ' +
str(offset))
time.sleep(offset)
if attrib is None:
attrib = {}
if ATTR_version in attrib and max_attempts is None:
max_attempts = 3
if max_attempts is None:
max_attempts = int(self.ptl_conf['expect_max_attempts'])
if interval is None:
interval = self.ptl_conf['expect_interval']
if attempt >= max_attempts:
_msg = "expected on " + self.logprefix + msg
raise PtlExpectError(rc=1, rv=False, msg=_msg)
if obj_type == SERVER and id is None:
id = self.hostname
if isinstance(attrib, str):
attrib = {attrib: ''}
elif isinstance(attrib, list):
d = {}
for l in attrib:
d[l] = ''
attrib = d
# Add check for substate=42 for jobstate=R, if not added explicitly.
if obj_type == JOB:
add_attribs = {'substate': False}
substate = False
for k, v in attrib.items():
if k == 'job_state' and ((isinstance(v, tuple) and
'R' in v[-1]) or v == 'R'):
add_attribs['substate'] = 42
elif k == 'job_state=R':
add_attribs['substate=42'] = v
elif 'substate' in k:
substate = True
if add_attribs['substate'] and not substate:
attrib['substate'] = add_attribs['substate']
attrop = PTL_AND
del add_attribs, substate
prefix = 'expect on ' + self.logprefix
msg = []
for k, v in attrib.items():
args = None
if isinstance(v, tuple):
operator = v[0]
if len(v) > 2:
args = v[2:]
val = v[1]
else:
operator = op
val = v
msg += [k, PTL_OP_TO_STR[operator].strip()]
if callable(val):
msg += ['callable(' + val.__name__ + ')']
if args is not None:
msg.extend(map(lambda x: str(x), args))
else:
msg += [str(val)]
msg += [PTL_ATTROP_TO_STR[attrop]]
# remove the last converted PTL_ATTROP_TO_STR
if len(msg) > 1:
msg = msg[:-1]
if len(attrib) == 0:
msg += [PTL_OP_TO_STR[op]]
msg += [PBS_OBJ_MAP[obj_type]]
if id is not None:
msg += [str(id)]
if attempt > 0:
msg += ['attempt:', str(attempt + 1)]
# Default count to True if the attribute contains an '=' in its name
# for example 'job_state=R' implies that a count of job_state is needed
if count is None and self.utils.operator_in_attribute(attrib):
count = True
if count:
newattr = self.utils.convert_attributes_by_op(attrib)
if len(newattr) == 0:
newattr = attrib
statlist = [self.counter(obj_type, newattr, id, extend, op=op,
attrop=attrop, level=logging.DEBUG,
runas=runas)]
else:
try:
statlist = self.status(obj_type, attrib, id=id,
level=logging.DEBUG, extend=extend,
runas=runas, logerr=False)
except PbsStatusError:
statlist = []
if (len(statlist) == 0 or statlist[0] is None or
len(statlist[0]) == 0):
if op == UNSET or list(set(attrib.values())) == [0]:
self.logger.log(level, prefix + " ".join(msg) + ' ... OK')
return True
else:
time.sleep(interval)
msg = " no data for " + " ".join(msg)
self.logger.log(level, prefix + msg)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval, count,
extend, level=level, msg=msg)
if attrib is None:
time.sleep(interval)
return self.expect(obj_type, attrib, id, op, attrop, attempt + 1,
max_attempts, interval, count, extend,
runas=runas, level=level, msg=" ".join(msg))
for k, v in attrib.items():
varargs = None
if isinstance(v, tuple):
op = v[0]
if len(v) > 2:
varargs = v[2:]
v = v[1]
for stat in statlist:
if k == ATTR_version and k in stat:
m = self.version_tag.match(stat[k])
if m:
stat[k] = m.group('version')
else:
time.sleep(interval)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval,
count, extend, runas=runas,
level=level, msg=" ".join(msg))
if k not in stat:
if op == UNSET:
continue
else:
# functions/methods are invoked and their return value
# used on expect
if callable(v):
if varargs is not None:
rv = v(stat[k], *varargs)
else:
rv = v(stat[k])
if isinstance(rv, bool):
if op == NOT:
if not rv:
continue
if rv:
continue
else:
v = rv
stat[k] = self.utils.decode_value(stat[k])
v = self.utils.decode_value(v)
if k == ATTR_version:
stat[k] = LooseVersion(str(stat[k]))
v = LooseVersion(str(v))
if op == EQ and stat[k] == v:
continue
elif op == SET and count and stat[k] == v:
continue
elif op == SET and count in (False, None):
continue
elif op == NE and stat[k] != v:
continue
elif op == LT:
if stat[k] < v:
continue
elif op == GT:
if stat[k] > v:
continue
elif op == LE:
if stat[k] <= v:
continue
elif op == GE:
if stat[k] >= v:
continue
elif op == MATCH_RE:
if re.search(str(v), str(stat[k])):
continue
elif op == MATCH:
if str(stat[k]).find(str(v)) != -1:
continue
if k in stat:
msg += [' got: ' + str(k) + ' = ' + str(stat[k])]
self.logger.info(prefix + " ".join(msg))
time.sleep(interval)
# run custom actions defined for this object type
if self.actions:
for act_obj in self.actions.get_actions_by_type(obj_type):
if act_obj.enabled:
act_obj.action(self, obj_type, attrib, id, op,
attrop)
return self.expect(obj_type, attrib, id, op, attrop,
attempt + 1, max_attempts, interval, count,
extend, level=level, msg=" ".join(msg))
self.logger.log(level, prefix + " ".join(msg) + ' ... OK')
return True
def is_history_enabled(self):
"""
Short-hand method to return the value of job_history_enable
"""
a = ATTR_JobHistoryEnable
attrs = self.status(SERVER, level=logging.DEBUG)[0]
if ((a in attrs.keys()) and attrs[a] == 'True'):
return True
return False
def cleanup_jobs(self, extend=None, runas=None):
"""
Helper function to delete all jobs.
By default this method will determine whether
job_history_enable is on and will cleanup all history
jobs. Specifying an extend parameter could override
this behavior.
:param runas: Clean the job as
:type runas: str or None
"""
delete_xt = 'force'
select_xt = None
if self.is_history_enabled():
delete_xt += 'deletehist'
select_xt = 'x'
job_ids = self.select(extend=select_xt)
if len(job_ids) > 0:
try:
self.deljob(id=job_ids, extend=delete_xt, runas=runas,
wait=True)
except:
pass
rv = self.expect(JOB, {'job_state': 0}, count=True, op=SET)
if not rv:
return self.cleanup_jobs(extend=extend, runas=runas)
return rv
def cleanup_reservations(self, extend=None, runas=None):
"""
Helper function to delete all reservations
"""
reservations = self.status(RESV, level=logging.DEBUG)
while reservations is not None and len(reservations) != 0:
resvs = [r['id'] for r in reservations]
if len(resvs) > 0:
try:
self.delresv(resvs, logerr=False, runas=runas)
except:
pass
reservations = self.status(RESV, level=logging.DEBUG)
def cleanup_jobs_and_reservations(self, extend='forcedeletehist'):
"""
Helper function to delete all jobs and reservations
:param extend: Optional extend parameter that is passed
to delete. It defaults to 'deletehist' which
is used in qdel and pbs_deljob() to force
delete all jobs, including history jobs
:param extend: str
"""
rv = self.cleanup_jobs(extend)
self.cleanup_reservations()
return rv
def update_attributes(self, obj_type, bs):
"""
Populate objects from batch status data
"""
if bs is None:
return
for binfo in bs:
if 'id' not in binfo:
continue
id = binfo['id']
obj = None
if obj_type == JOB:
if ATTR_owner in binfo:
user = binfo[ATTR_owner].split('@')[0]
else:
user = None
if id in self.jobs:
self.jobs[id].attributes.update(binfo)
if self.jobs[id].username != user:
self.jobs[id].username = user
else:
self.jobs[id] = Job(user, binfo)
obj = self.jobs[id]
elif obj_type in (VNODE, NODE):
if id in self.nodes:
self.nodes[id].attributes.update(binfo)
else:
self.nodes[id] = MoM(id, binfo, diagmap={NODE: None},
server=self)
obj = self.nodes[id]
elif obj_type == SERVER:
self.attributes.update(binfo)
obj = self
elif obj_type == QUEUE:
if id in self.queues:
self.queues[id].attributes.update(binfo)
else:
self.queues[id] = Queue(id, binfo, server=self)
obj = self.queues[id]
elif obj_type == RESV:
if id in self.reservations:
self.reservations[id].attributes.update(binfo)
else:
self.reservations[id] = Reservation(id, binfo)
obj = self.reservations[id]
elif obj_type == HOOK:
if id in self.hooks:
self.hooks[id].attributes.update(binfo)
else:
self.hooks[id] = Hook(id, binfo, server=self)
obj = self.hooks[id]
elif obj_type == SCHED:
if self.scheduler:
self.scheduler.attributes.update(binfo)
else:
if SCHED in self.diagmap:
diag = self.diag
diagmap = self.diagmap
else:
diag = None
diagmap = None
self.scheduler = Scheduler(server=self, diag=diag,
diagmap=diagmap)
self.scheduler.attributes.update(binfo)
obj = self.scheduler
elif obj_type == RSC:
if id in self.resources:
self.resources[id].attributes.update(binfo)
else:
rtype = None
rflag = None
if 'type' in binfo:
rtype = binfo['type']
if 'flag' in binfo:
rflag = binfo['flag']
self.resources[id] = Resource(id, rtype, rflag)
if obj is not None:
self.utils.update_attributes_list(obj)
obj.__dict__.update(binfo)
def counter(self, obj_type=None, attrib=None, id=None, extend=None,
op=None, attrop=None, bslist=None, level=logging.INFO,
idonly=True, grandtotal=False, db_access=None, runas=None,
resolve_indirectness=False):
"""
Accumulate properties set on an object. For example, to
count number of free nodes:
``server.counter(VNODE,{'state':'free'})``
:param obj_type: The type of object to query, one of the
* objects
:param attrib: Attributes to query, can be a string, a
list, a dictionary
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param extend: The extended parameter to pass to the stat
call
:param op: The operation used to match attrib to what is
queried. SET or None
:type op: str or None
:param attrop: Operation on multiple attributes, either
PTL_AND, PTL_OR
:param bslist: Optional, use a batch status dict list
instead of an obj_type
:param idonly: if true, return the name/id of the matching
objects
:type idonly: bool
:param db_access: credentials to access db, either a path
to file or dictionary
:type db_access: str or dictionary
:param runas: run as user
:type runas: str or None
"""
self.logit('counter: ', obj_type, attrib, id, level=level)
return self._filter(obj_type, attrib, id, extend, op, attrop, bslist,
PTL_COUNTER, idonly, grandtotal, db_access,
runas=runas,
resolve_indirectness=resolve_indirectness)
def filter(self, obj_type=None, attrib=None, id=None, extend=None, op=None,
attrop=None, bslist=None, idonly=True, grandtotal=False,
db_access=None, runas=None, resolve_indirectness=False):
"""
Filter objects by properties. For example, to filter all
free nodes:``server.filter(VNODE,{'state':'free'})``
For each attribute queried, if idonly is True, a list of
matching object names is returned; if idonly is False, then
the value of each attribute queried is returned.
This is unlike Python's built-in 'filter' that returns a
subset of objects matching from a pool of objects. The
Python filtering mechanism remains very useful in some
situations and should be used programmatically to achieve
desired filtering goals that can not be met easily with
PTL's filter method.
:param obj_type: The type of object to query, one of the
* objects
:param attrib: Attributes to query, can be a string, a
list, a dictionary
:type attrib: str or list or dictionary
:param id: The id of the object to act upon
:param extend: The extended parameter to pass to the stat
call
:param op: The operation used to match attrib to what is
queried. SET or None
:type op: str or None
:param bslist: Optional, use a batch status dict list
instead of an obj_type
:type bslist: List or None
:param idonly: if true, return the name/id of the matching
objects
:type idonly: bool
:param db_access: credentials to access db, either path to
file or dictionary
:type db_access: str or dictionary
:param runas: run as user
:type runas: str or None
"""
self.logit('filter: ', obj_type, attrib, id)
return self._filter(obj_type, attrib, id, extend, op, attrop, bslist,
PTL_FILTER, idonly, db_access, runas=runas,
resolve_indirectness=resolve_indirectness)
def _filter(self, obj_type=None, attrib=None, id=None, extend=None,
op=None, attrop=None, bslist=None, mode=PTL_COUNTER,
idonly=True, grandtotal=False, db_access=None, runas=None,
resolve_indirectness=False):
if bslist is None:
try:
_a = resolve_indirectness
tmp_bsl = self.status(obj_type, attrib, id,
level=logging.DEBUG, extend=extend,
db_access=db_access, runas=runas,
resolve_indirectness=_a)
del _a
except PbsStatusError:
return None
bslist = self.utils.filter_batch_status(tmp_bsl, attrib)
del tmp_bsl
if bslist is None:
return None
if isinstance(attrib, str):
attrib = attrib.split(',')
total = {}
for bs in bslist:
if isinstance(attrib, list):
# when filtering on multiple values, ensure that they are
# all present on the object, otherwise skip
if attrop == PTL_AND:
match = True
for k in attrib:
if k not in bs:
match = False
if not match:
continue
for a in attrib:
if a in bs:
if op == SET:
k = a
else:
# Since this is a list of attributes, no operator
# was provided so we settle on "equal"
k = a + '=' + str(bs[a])
if mode == PTL_COUNTER:
amt = 1
if grandtotal:
amt = self.utils.decode_value(bs[a])
if not isinstance(amt, (int, float)):
amt = 1
if a in total:
total[a] += amt
else:
total[a] = amt
else:
if k in total:
total[k] += amt
else:
total[k] = amt
elif mode == PTL_FILTER:
if k in total:
if idonly:
total[k].append(bs['id'])
else:
total[k].append(bs)
else:
if idonly:
total[k] = [bs['id']]
else:
total[k] = [bs]
else:
self.logger.error("Unhandled mode " + str(mode))
return None
elif isinstance(attrib, dict):
tmptotal = {} # The running count that will be used for total
# when filtering on multiple values, ensure that they are
# all present on the object, otherwise skip
match = True
for k, v in attrib.items():
if k not in bs:
match = False
if attrop == PTL_AND:
break
else:
continue
amt = self.utils.decode_value(bs[k])
if isinstance(v, tuple):
op = v[0]
val = self.utils.decode_value(v[1])
elif op == SET:
val = None
pass
else:
op = EQ
val = self.utils.decode_value(v)
if ((op == LT and amt < val) or
(op == LE and amt <= val) or
(op == EQ and amt == val) or
(op == GE and amt >= val) or
(op == GT and amt > val) or
(op == NE and amt != val) or
(op == MATCH and str(amt).find(str(val)) != -1) or
(op == MATCH_RE and
re.search(str(val), str(amt))) or
(op == SET)):
# There is a match, proceed to track the attribute
self._filter_helper(bs, k, val, amt, op, mode,
tmptotal, idonly, grandtotal)
elif attrop == PTL_AND:
match = False
if mode == PTL_COUNTER:
# requesting specific key/value pairs should result
# in 0 available elements
tmptotal[str(k) + PTL_OP_TO_STR[op] + str(val)] = 0
break
elif mode == PTL_COUNTER:
tmptotal[str(k) + PTL_OP_TO_STR[op] + str(val)] = 0
if attrop != PTL_AND or (attrop == PTL_AND and match):
for k, v in tmptotal.items():
if k not in total:
total[k] = v
else:
total[k] += v
return total
def _filter_helper(self, bs, k, v, amt, op, mode, total, idonly,
grandtotal):
# default operation to '='
if op is None or op not in PTL_OP_TO_STR:
op = '='
op_str = PTL_OP_TO_STR[op]
if op == SET:
# override PTL_OP_TO_STR fro SET operations
op_str = ''
v = ''
ky = k + op_str + str(v)
if mode == PTL_COUNTER:
incr = 1
if grandtotal:
if not isinstance(amt, (int, float)):
incr = 1
else:
incr = amt
if ky in total:
total[ky] += incr
else:
total[ky] = incr
elif mode == PTL_FILTER:
if ky in total:
if idonly:
total[ky].append(bs['id'])
else:
total[ky].append(bs)
else:
if idonly:
total[ky] = [bs['id']]
else:
total[ky] = [bs]
def logit(self, msg, obj_type, attrib, id, level=logging.INFO):
"""
Generic logging routine for ``IFL`` commands
:param msg: The message to log
:type msg: str
:param obj_type: object type, i.e *
:param attrib: attributes to log
:param id: name of object to log
:type id: str or list
:param level: log level, defaults to ``INFO``
"""
s = []
if self.logger is not None:
if obj_type is None:
obj_type = MGR_OBJ_NONE
s = [msg + PBS_OBJ_MAP[obj_type]]
if id:
if isinstance(id, list):
s += [' ' + ",".join(id)]
else:
s += [' ' + str(id)]
if attrib:
s += [' ' + str(attrib)]
self.logger.log(level, "".join(s))
def equivalence_classes(self, obj_type=None, attrib={}, bslist=None,
op=RESOURCES_AVAILABLE, show_zero_resources=True,
db_access=None, resolve_indirectness=False):
"""
:param obj_type: PBS Object to query, one of *
:param attrib: attributes to build equivalence classes
out of.
:type attrib: dictionary
:param bslist: Optional, list of dictionary representation
of a batch status
:type bslist: List
:param op: set to RESOURCES_AVAILABLE uses the dynamic
amount of resources available, i.e., available -
assigned, otherwise uses static amount of
resources available
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if attrib is None:
attrib = {}
if len(attrib) == 0 and obj_type is not None:
if obj_type in (VNODE, NODE):
attrib = ['resources_available.ncpus',
'resources_available.mem', 'state']
elif obj_type == JOB:
attrib = ['Resource_List.select',
'queue', 'array_indices_submitted']
elif obj_type == RESV:
attrib = ['Resource_List.select']
else:
return {}
if bslist is None and obj_type is not None:
# To get the resources_assigned we must stat the entire object so
# bypass the specific attributes that would filter out assigned
if op == RESOURCES_AVAILABLE:
bslist = self.status(obj_type, None, level=logging.DEBUG,
db_access=db_access,
resolve_indirectness=resolve_indirectness)
else:
bslist = self.status(obj_type, attrib, level=logging.DEBUG,
db_access=db_access,
resolve_indirectness=resolve_indirectness)
if bslist is None or len(bslist) == 0:
return {}
# automatically convert an objectlist into a batch status dict list
# for ease of use.
if not isinstance(bslist[0], dict):
bslist = self.utils.objlist_to_dictlist(bslist)
if isinstance(attrib, str):
attrib = attrib.split(',')
self.logger.debug("building equivalence class")
equiv = {}
for bs in bslist:
cls = ()
skip_cls = False
# attrs will be part of the EquivClass object
attrs = {}
# Filter the batch attributes by the attribs requested
for a in attrib:
if a in bs:
amt = self.utils.decode_value(bs[a])
if a.startswith('resources_available.'):
val = a.replace('resources_available.', '')
if (op == RESOURCES_AVAILABLE and
'resources_assigned.' + val in bs):
amt = (int(amt) - int(self.utils.decode_value(
bs['resources_assigned.' + val])))
# this case where amt goes negative is not a bug, it
# may happen when computing whats_available due to the
# fact that the computation is subtractive, it does
# add back resources when jobs/reservations end but
# is only concerned with what is available now for
# a given duration, that is why in the case where
# amount goes negative we set it to 0
if amt < 0:
amt = 0
# TODO: not a failproof way to catch a memory type
# but PbsTypeSize should return the right value if
# it fails to parse it as a valid memory value
if a.endswith('mem'):
try:
amt = PbsTypeSize().encode(amt)
except:
# we guessed the type incorrectly
pass
else:
val = a
if amt == 0 and not show_zero_resources:
skip_cls = True
break
# Build the key of the equivalence class
cls += (val + '=' + str(amt),)
attrs[val] = amt
# Now that we are done with this object, add it to an equiv class
if len(cls) > 0 and not skip_cls:
if cls in equiv:
equiv[cls].add_entity(bs['id'])
else:
equiv[cls] = EquivClass(cls, attrs, [bs['id']])
return equiv.values()
def show_equivalence_classes(self, eq=None, obj_type=None, attrib={},
bslist=None, op=RESOURCES_AVAILABLE,
show_zero_resources=True, db_access=None,
resolve_indirectness=False):
"""
helper function to show the equivalence classes
:param eq: equivalence classes as compute by
equivalence_classes see equivalence_classes
for remaining parameters description
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if eq is None:
equiv = self.equivalence_classes(obj_type, attrib, bslist, op,
show_zero_resources, db_access,
resolve_indirectness)
else:
equiv = eq
equiv = sorted(equiv, key=lambda e: len(e.entities))
for e in equiv:
# e.show()
print str(e)
def whats_available(self, attrib=None, jobs=None, resvs=None, nodes=None):
"""
Returns what's available as a list of node equivalence
classes listed by availability over time.
:param attrib: attributes to consider
:type attrib: List
:param jobs: jobs to consider, if None, jobs are queried
locally
:param resvs: reservations to consider, if None, they are
queried locally
:param nodes: nodes to consider, if None, they are queried
locally
"""
if attrib is None:
attrib = ['resources_available.ncpus',
'resources_available.mem', 'state']
if resvs is None:
self.status(RESV)
resvs = self.reservations
if jobs is None:
self.status(JOB)
jobs = self.jobs
if nodes is None:
self.status(NODE)
nodes = self.nodes
nodes_id = nodes.keys()
avail_nodes_by_time = {}
def alloc_resource(self, node, resources):
# helper function. Must work on a scratch copy of nodes otherwise
# resources_available will get corrupted
for rsc, value in resources.items():
if isinstance(value, int) or value.isdigit():
avail = node.attributes['resources_available.' + rsc]
nvalue = int(avail) - int(value)
node.attributes['resources_available.' + rsc] = nvalue
# Account for reservations
for resv in resvs.values():
resvnodes = resv.execvnode('resv_nodes')
if resvnodes:
starttime = self.utils.convert_stime_to_seconds(
resv.attributes['reserve_start'])
for node in resvnodes:
for n, resc in node.items():
tm = int(starttime) - int(self.ctime)
if tm < 0 or n not in nodes_id:
continue
if tm not in avail_nodes_by_time:
avail_nodes_by_time[tm] = []
if nodes[n].attributes['sharing'] in ('default_excl',
'force_excl'):
avail_nodes_by_time[tm].append(nodes[n])
try:
nodes_id.remove(n)
except:
pass
else:
ncopy = copy.copy(nodes[n])
ncopy.attributes = copy.deepcopy(
nodes[n].attributes)
avail_nodes_by_time[tm].append(ncopy)
self.alloc_resource(nodes[n], resc)
# go on to look at the calendar of scheduled jobs to run and set
# the node availability according to when the job is estimated to
# start on the node
for job in self.jobs.values():
if (job.attributes['job_state'] != 'R' and
'estimated.exec_vnode' in job.attributes):
estimatednodes = job.execvnode('estimated.exec_vnode')
if estimatednodes:
st = job.attributes['estimated.start_time']
# Tweak for nas format of estimated time that has
# num seconds from epoch followed by datetime
if st.split()[0].isdigit():
starttime = st.split()[0]
else:
starttime = self.utils.convert_stime_to_seconds(st)
for node in estimatednodes:
for n, resc in node.items():
tm = int(starttime) - int(self.ctime)
if (tm < 0 or n not in nodes_id or
nodes[n].state != 'free'):
continue
if tm not in avail_nodes_by_time:
avail_nodes_by_time[tm] = []
if (nodes[n].attributes['sharing'] in
('default_excl', 'force_excl')):
avail_nodes_by_time[tm].append(nodes[n])
try:
nodes_id.remove(n)
except:
pass
else:
ncopy = copy.copy(nodes[n])
ncopy.attributes = copy.deepcopy(
nodes[n].attributes)
avail_nodes_by_time[tm].append(ncopy)
self.alloc_resource(nodes[n], resc)
# remaining nodes are free "forever"
for node in nodes_id:
if self.nodes[node].state == 'free':
if 'infinity' not in avail_nodes_by_time:
avail_nodes_by_time['infinity'] = [nodes[node]]
else:
avail_nodes_by_time['infinity'].append(nodes[node])
# if there is a dedicated time, move the availaility time up to that
# time as necessary
if self.scheduler:
scheduler = self.scheduler
else:
scheduler = Scheduler(server=self)
scheduler.parse_dedicated_time()
if scheduler.dedicated_time:
dedtime = scheduler.dedicated_time[0]['from'] - int(self.ctime)
if dedtime <= int(time.time()):
dedtime = None
else:
dedtime = None
# finally, build the equivalence classes off of the nodes availability
# over time
self.logger.debug("Building equivalence classes")
whazzup = {}
if 'state' in attrib:
attrib.remove('state')
for tm, nds in avail_nodes_by_time.items():
equiv = self.equivalence_classes(VNODE, attrib, bslist=nds,
show_zero_resources=False)
if dedtime and (tm > dedtime or tm == 'infinity'):
tm = dedtime
if tm != 'infinity':
tm = str(datetime.timedelta(seconds=int(tm)))
whazzup[tm] = equiv
return whazzup
def show_whats_available(self, wa=None, attrib=None, jobs=None,
resvs=None, nodes=None):
"""
helper function to show availability as computed by
whats_available
:param wa: a dictionary of available attributes. see
whats_available for a\
description of the remaining parameters
:type wa: Dictionary
"""
if wa is None:
wa = self.whats_available(attrib, jobs, resvs, nodes)
if len(wa) > 0:
print "%24s\t%s" % ("Duration of availability", "Resources")
print "-------------------------\t----------"
swa = sorted(wa.items(), key=lambda x: x[0])
for (k, eq_classes) in swa:
for eq_cl in eq_classes:
print "%24s\t%s" % (str(k), str(eq_cl))
def utilization(self, resources=None, nodes=None, jobs=None, entity={}):
"""
Return utilization of consumable resources on a set of
nodes
:param nodes: A list of dictionary of nodes on which to
compute utilization.Defaults to nodes
resulting from a stat call to the current
server.
:type nodes: List
:param resources: comma-separated list of resources to
compute utilization on. The name of the
resource is for example, ncpus or mem
:type resources: List
:param entity: An optional dictionary of entities to
compute utilization of,
``e.g. {'user':u1, 'group':g1, 'project'=p1}``
:type entity: Dictionary
The utilization is returned as a dictionary of percentage
utilization for each resource.
Non-consumable resources are silently ignored.
"""
if nodes is None:
nodes = self.status(NODE)
if jobs is None:
jobs = self.status(JOB)
if resources is None:
rescs = ['ncpus', 'mem']
else:
rescs = resources
utilization = {}
resavail = {}
resassigned = {}
usednodes = 0
totnodes = 0
nodes_set = set()
for res in rescs:
resavail[res] = 0
resassigned[res] = 0
# If an entity is specified utilization must be collected from the
# Jobs usage, otherwise we can get the information directly from
# the nodes.
if len(entity) > 0 and jobs is not None:
for job in jobs:
if 'job_state' in job and job['job_state'] != 'R':
continue
entity_match = True
for k, v in entity.items():
if k not in job or job[k] != v:
entity_match = False
break
if entity_match:
for res in rescs:
r = 'Resource_List.' + res
if r in job:
tmpr = int(self.utils.decode_value(job[r]))
resassigned[res] += tmpr
if 'exec_host' in job:
hosts = ResourceResv.get_hosts(job['exec_host'])
nodes_set |= set(hosts)
for node in nodes:
# skip nodes in non-schedulable state
nstate = node['state']
if ('down' in nstate or 'unavailable' in nstate or
'unknown' in nstate or 'Stale' in nstate):
continue
totnodes += 1
# If an entity utilization was requested, all used nodes were
# already filtered into the nodes_set specific to that entity, we
# simply add them up. If no entity was requested, it suffices to
# have the node have a jobs attribute to count it towards total
# used nodes
if len(entity) > 0:
if node['id'] in nodes_set:
usednodes += 1
elif 'jobs' in node:
usednodes += 1
for res in rescs:
avail = 'resources_available.' + res
if avail in node:
val = self.utils.decode_value(node[avail])
if isinstance(val, int):
resavail[res] += val
# When entity matching all resources assigned are
# accounted for by the job usage
if len(entity) == 0:
assigned = 'resources_assigned.' + res
if assigned in node:
val = self.utils.decode_value(node[assigned])
if isinstance(val, int):
resassigned[res] += val
for res in rescs:
if res in resavail:
if res in resassigned:
if resavail[res] > 0:
utilization[res] = [resassigned[res], resavail[res]]
# Only report nodes utilization if no specific resources were requested
if resources is None:
utilization['nodes'] = [usednodes, totnodes]
return utilization
def create_vnodes(self, name=None, attrib=None, num=1, mom=None,
additive=False, sharednode=True, restart=True,
delall=True, natvnode=None, usenatvnode=False,
attrfunc=None, fname=None, vnodes_per_host=1,
createnode=True, expect=True):
"""
helper function to create vnodes.
:param name: prefix name of the vnode(s) to create
:type name: str or None
:param attrib: attributes to assign to each node
:param num: the number of vnodes to create. Defaults to 1
:type num: int
:param mom: the MoM object on which the vnode definition is
to be inserted
:param additive: If True, vnodes are added to the existing
vnode defs.Defaults to False.
:type additive: bool
:param sharednode: If True, all vnodes will share the same
host.Defaults to True.
:type sharednode: bool
:param restart: If True the MoM will be restarted.
:type restart: bool
:param delall: If True delete all server nodes prior to
inserting vnodes
:type delall: bool
:param natvnode: name of the natural vnode.i.e. The node
name in qmgr -c "create node <name>"
:type natvnode: str or None
:param usenatvnode: count the natural vnode as an
allocatable node.
:type usenatvnode: bool
:param attrfunc: an attribute=value function generator,
see create_vnode_def
:param fname: optional name of the vnode def file
:type fname: str or None
:param vnodes_per_host: number of vnodes per host
:type vnodes_per_host: int
:param createnode: whether to create the node via manage or
not. Defaults to True
:type createnode: bool
:param expect: whether to expect attributes to be set or
not. Defaults to True
:type expect: bool
:returns: True on success and False otherwise
"""
if mom is None or name is None or attrib is None:
self.logger.error("name, attributes, and mom object are required")
return False
if delall:
try:
rv = self.manager(MGR_CMD_DELETE, NODE, None, "")
if rv != 0:
return False
except PbsManagerError:
pass
if natvnode is None:
natvnode = mom.shortname
vdef = mom.create_vnode_def(name, attrib, num, sharednode,
usenatvnode=usenatvnode, attrfunc=attrfunc,
vnodes_per_host=vnodes_per_host)
mom.insert_vnode_def(vdef, fname=fname, additive=additive,
restart=restart)
if createnode:
try:
statm = self.status(NODE, id=natvnode)
except:
statm = []
if len(statm) >= 1:
_m = 'Mom %s already exists, not creating' % (natvnode)
self.logger.info(_m)
else:
if mom.pbs_conf and 'PBS_MOM_SERVICE_PORT' in mom.pbs_conf:
m_attr = {'port': mom.pbs_conf['PBS_MOM_SERVICE_PORT']}
else:
m_attr = None
self.manager(MGR_CMD_CREATE, NODE, m_attr, natvnode)
attrs = {}
# only expect if vnodes were added rather than the nat vnode modified
if expect and num > 0:
for k, v in attrib.items():
attrs[str(k) + '=' + str(self.utils.decode_value(v))] = num
attrs['state=free'] = num
rv = self.expect(VNODE, attrs, attrop=PTL_AND)
else:
rv = True
return rv
def create_moms(self, name=None, attrib=None, num=1, delall=True,
createnode=True, conf_prefix='pbs.conf_m',
home_prefix='pbs_m', momhosts=None, init_port=15011,
step_port=2):
"""
Create MoM configurations and optionall add them to the
server. Unique ``pbs.conf`` files are defined and created
on each hosts on which MoMs are to be created.
:param name: Optional prefix name of the nodes to create.
Defaults to the name of the MoM host.
:type name: str or None
:param attrib: Optional node attributes to assign to the
MoM.
:param num: Number of MoMs to create
:type num: int
:param delall: Whether to delete all nodes on the server.
Defaults to True.
:type delall: bool
:param createnode: Whether to create the nodes and add them
to the server.Defaults to True.
:type createnode: bool
:param conf_prefix: The prefix of the PBS conf file.Defaults
to pbs.conf_m
:type conf_prefix: str
:param home_prefix: The prefix of the PBS_HOME directory.
Defaults to pbs_m
:type home_prefix: str
:param momhosts: A list of hosts on which to deploy num
MoMs.
:type momhosts: List
:param init_port: The initial port number to start assigning
``PBS_MOM_SERIVCE_PORT to.
Default 15011``.
:type init_port: int
:param step_port: The increments at which ports are
allocated. Defaults to 2.
:type step_port: int
.. note:: Since PBS requires that
PBS_MANAGER_SERVICE_PORT = PBS_MOM_SERVICE_PORT+1
The step number must be greater or equal to 2.
"""
if not self.isUp():
logging.error("An up and running PBS server on " + self.hostname +
" is required")
return False
if delall:
try:
rc = self.manager(MGR_CMD_DELETE, NODE, None, "")
except PbsManagerError, e:
rc = e.rc
if rc:
if len(self.status(NODE)) > 0:
self.logger.error("create_moms: Error deleting all nodes")
return False
pi = PBSInitServices()
if momhosts is None:
momhosts = [self.hostname]
if attrib is None:
attrib = {}
error = False
for hostname in momhosts:
_pconf = self.du.parse_pbs_config(hostname)
if 'PBS_HOME' in _pconf:
_hp = _pconf['PBS_HOME']
if _hp.endswith('/'):
_hp = _hp[:-1]
_hp = os.path.dirname(_hp)
else:
_hp = '/var/spool'
_np_conf = _pconf
_np_conf['PBS_START_SERVER'] = '0'
_np_conf['PBS_START_SCHED'] = '0'
_np_conf['PBS_START_MOM'] = '1'
for i in xrange(0, num * step_port, step_port):
_np = os.path.join(_hp, home_prefix + str(i))
_n_pbsconf = os.path.join('/etc', conf_prefix + str(i))
_np_conf['PBS_HOME'] = _np
port = init_port + i
_np_conf['PBS_MOM_SERVICE_PORT'] = str(port)
_np_conf['PBS_MANAGER_SERVICE_PORT'] = str(port + 1)
self.du.set_pbs_config(hostname, fout=_n_pbsconf,
confs=_np_conf)
pi.initd(hostname, conf_file=_n_pbsconf, op='start')
m = MoM(hostname, pbsconf_file=_n_pbsconf)
if m.isUp():
m.stop()
if hostname != self.hostname:
m.add_config({'$clienthost': self.hostname})
try:
m.start()
except PbsServiceError:
# The service failed to start
self.logger.error("Service failed to start using port " +
str(port) + "...skipping")
self.du.rm(hostname, _n_pbsconf)
continue
if createnode:
attrib['Mom'] = hostname
attrib['port'] = port
if name is None:
name = hostname.split('.')[0]
_n = name + '-' + str(i)
rc = self.manager(MGR_CMD_CREATE, NODE, attrib, id=_n)
if rc != 0:
self.logger.error("error creating node " + _n)
error = True
if error:
return False
return True
def create_hook(self, name, attrs):
"""
Helper function to create a hook by name.
:param name: The name of the hook to create
:type name: str
:param attrs: The attributes to create the hook with.
:type attrs: str
:returns: False if hook already exists
:raises: PbsManagerError, otherwise return True.
"""
hooks = self.status(HOOK)
if ((hooks is None or len(hooks) == 0) or
(name not in map(lambda x: x['id'], hooks))):
self.manager(MGR_CMD_CREATE, HOOK, None, name)
else:
self.logger.error('hook named ' + name + ' exists')
return False
self.manager(MGR_CMD_SET, HOOK, attrs, id=name, expect=True)
return True
def import_hook(self, name, body):
"""
Helper function to import hook body into hook by name.
The hook must have been created prior to calling this
function.
:param name: The name of the hook to import body to
:type name: str
:param body: The body of the hook as a string.
:type body: str
:returns: True on success.
:raises: PbsManagerError
"""
(fd, fn) = self.du.mkstemp()
os.write(fd, body)
os.close(fd)
if not self._is_local:
tmpdir = self.du.get_tempdir(self.hostname)
rfile = os.path.join(tmpdir, os.path.basename(fn))
self.du.run_copy(self.hostname, fn, rfile)
else:
rfile = fn
a = {'content-type': 'application/x-python',
'content-encoding': 'default',
'input-file': rfile}
self.manager(MGR_CMD_IMPORT, HOOK, a, name)
os.remove(rfile)
if not self._is_local:
self.du.rm(self.hostname, rfile)
self.logger.info('server ' + self.shortname +
': imported hook body\n---\n' + body + '---')
return True
def create_import_hook(self, name, attrs=None, body=None, overwrite=True):
"""
Helper function to create a hook, import content into it,
set the event and enable it.
:param name: The name of the hook to create
:type name: str
:param attrs: The attributes to create the hook with.
Event and Enabled are mandatory. No defaults.
:type attrs: str
:param body: The hook body as a string
:type body: str
:param overwrite: If True, if a hook of the same name
already exists, bypass its creation.
Defaults to True
:returns: True on success and False otherwise
"""
if 'event' not in attrs:
self.logger.error('attrs must specify at least an event and key')
return False
hook_exists = False
hooks = self.status(HOOK)
for h in hooks:
if h['id'] == name:
hook_exists = True
if not hook_exists or not overwrite:
rv = self.create_hook(name, attrs)
if not rv:
return False
else:
if attrs is None:
attrs = {'enabled': 'true'}
rc = self.manager(MGR_CMD_SET, HOOK, attrs, id=name)
if rc != 0:
return False
# In 12.0 A MoM hook must be enabled and the event set prior to
# importing, otherwise the MoM does not get the hook content
return self.import_hook(name, body)
def evaluate_formula(self, jobid=None, formula=None, full=True,
include_running_jobs=False, exclude_subjobs=True):
"""
Evaluate the job sort formula
:param jobid: If set, evaluate the formula for the given
jobid, if not set,formula is evaluated for
all jobs in state Q
:type jobid: str or None
:param formula: If set use the given formula. If not set,
the server's formula, if any, is used
:param full: If True, returns a dictionary of job
identifiers as keys and the evaluated formula
as values. Returns None if no formula is used.
Each job id formula is returned as a tuple
(s,e) where s is the formula expression
associated to the job and e is the evaluated
numeric value of that expression, for example,
if job_sort_formula is ncpus + mem
a job requesting 2 cpus and 100kb of memory
would return ('2 + 100', 102). If False, if
a jobid is specified, return the integer
value of the evaluated formula.
:type full: bool
:param include_running_jobs: If True, reports formula
value of running jobs.
Defaults to False.
:type include_running_jobs: bool
:param exclude_subjobs: If True, only report formula of
parent job array
:type exclude_subjobs: bool
"""
_f_builtins = ['queue_priority', 'job_priority', 'eligible_time',
'fair_share_perc']
if formula is None:
d = self.status(SERVER, 'job_sort_formula')
if len(d) > 0 and 'job_sort_formula' in d[0]:
formula = d[0]['job_sort_formula']
else:
return None
template_formula = self.utils._make_template_formula(formula)
# to split up the formula into keywords, first convert all possible
# operators into spaces and split the string.
# TODO: The list of operators may need to be expanded
T = string.maketrans('()%+*/-', ' ' * 7)
fres = string.translate(formula, T).split()
if jobid:
d = self.status(JOB, id=jobid, extend='t')
else:
d = self.status(JOB, extend='t')
ret = {}
for job in d:
if not include_running_jobs and job['job_state'] != 'Q':
continue
f_value = {}
# initialize the formula values to 0
for res in fres:
f_value[res] = 0
if 'queue_priority' in fres:
queue = self.status(JOB, 'queue', id=job['id'])[0]['queue']
d = self.status(QUEUE, 'Priority', id=queue)
if d and 'Priority' in d[0]:
qprio = int(d[0]['Priority'])
f_value['queue_priority'] = qprio
else:
continue
if 'job_priority' in fres:
if 'Priority' in job:
jprio = int(job['Priority'])
f_value['job_priority'] = jprio
else:
continue
if 'eligible_time' in fres:
if 'eligible_time' in job:
f_value['eligible_time'] = self.utils.convert_duration(
job['eligible_time'])
if 'fair_share_perc' in fres:
if self.scheduler is None:
self.scheduler = Scheduler(server=self)
if 'fairshare_entity' in self.scheduler.sched_config:
entity = self.scheduler.sched_config['fairshare_entity']
else:
self.logger.error(self.logprefix +
' no fairshare entity in sched config')
continue
if entity not in job:
self.logger.error(self.logprefix +
' job does not have property ' + entity)
continue
try:
fs_info = self.scheduler.query_fairshare(name=job[entity])
if fs_info is not None and 'TREEROOT' in fs_info.perc:
f_value['fair_share_perc'] = \
(fs_info.perc['TREEROOT'] / 100)
except PbsFairshareError:
f_value['fair_share_perc'] = 0
for job_res, val in job.items():
val = self.utils.decode_value(val)
if job_res.startswith('Resource_List.'):
job_res = job_res.replace('Resource_List.', '')
if job_res in fres and job_res not in _f_builtins:
f_value[job_res] = val
tf = string.Template(template_formula)
tfstr = tf.safe_substitute(f_value)
if (jobid is not None or not exclude_subjobs or
(exclude_subjobs and not self.utils.is_subjob(job['id']))):
ret[job['id']] = (tfstr, eval(tfstr))
if not full and jobid is not None and jobid in ret:
return ret[job['id']][1]
return ret
def _parse_limits(self, container=None, dictlist=None, id=None,
db_access=None):
"""
Helper function to parse limits syntax on a given
container.
:param container: The PBS object to query, one of ``QUEUE``
or ``SERVER``.Metascheduling node group
limits are not yet queri-able
:type container: str or None
:param dictlist: A list of dictionaries off of a batch
status
:type diclist: List
:param id: Optional id of the object to query
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if container is None:
self.logger.error('parse_limits expect container to be set')
return {}
if dictlist is None:
d = self.status(container, db_access=db_access)
else:
d = dictlist
if not d:
return {}
limits = {}
for obj in d:
# filter the id here instead of during the stat call so that
# we can call a full stat once rather than one stat per object
if id is not None and obj['id'] != id:
continue
for k, v in obj.items():
if k.startswith('max_run'):
v = v.split(',')
for rval in v:
rval = rval.strip("'")
l = self.utils.parse_fgc_limit(k + '=' + rval)
if l is None:
self.logger.error("Couldn't parse limit: " +
k + str(rval))
continue
(lim_type, resource, etype, ename, value) = l
if (etype, ename) not in self.entities:
entity = Entity(etype, ename)
self.entities[(etype, ename)] = entity
else:
entity = self.entities[(etype, ename)]
lim = Limit(lim_type, resource, entity, value,
container, obj['id'])
if container in limits:
limits[container].append(lim)
else:
limits[container] = [lim]
entity.set_limit(lim)
return limits
def parse_server_limits(self, server=None, db_access=None):
"""
Parse all server limits
:param server: list of dictionary of server data
:type server: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
return self._parse_limits(SERVER, server, db_access=db_access)
def parse_queue_limits(self, queues=None, id=None, db_access=None):
"""
Parse queue limits
:param queues: list of dictionary of queue data
:type queues: List
:param id: The id of the queue to parse limit for. If None,
all queue limits are parsed
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
return self._parse_limits(QUEUE, queues, id=id, db_access=db_access)
def parse_all_limits(self, server=None, queues=None, db_access=None):
"""
Parse all server and queue limits
:param server: list of dictionary of server data
:type server: List
:param queues: list of dictionary of queue data
:type queues: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
if hasattr(self, 'limits'):
del self.limits
slim = self.parse_server_limits(server, db_access=db_access)
qlim = self.parse_queue_limits(queues, id=None, db_access=db_access)
self.limits = dict(slim.items() + qlim.items())
del slim
del qlim
return self.limits
def limits_info(self, etype=None, ename=None, server=None, queues=None,
jobs=None, db_access=None, over=False):
"""
Collect limit information for each entity on which a
``server/queue`` limit is applied.
:param etype: entity type, one of u, g, p, o
:type etype: str or None
:param ename: entity name
:type ename: str or None
:param server: optional list of dictionary representation
of server object
:type server: List
:param queues: optional list of dictionary representation
of queues object
:type queues: List
:param jobs: optional list of dictionary representation of
jobs object
:type jobs: List
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
:param over: If True, show only entities that are over their
limit.Default is False.
:type over: bool
:returns: A list of dictionary similar to that returned by
a converted batch_status object, i.e., can be
displayed using the Utils.show method
"""
def create_linfo(lim, entity_type, id, used):
"""
Create limit information
:param lim: Limit to apply
:param entity_type: Type of entity
"""
tmp = {}
tmp['id'] = entity_type + ':' + id
c = [PBS_OBJ_MAP[lim.container]]
if lim.container_id:
c += [':', lim.container_id]
tmp['container'] = "".join(c)
s = [str(lim.limit_type)]
if lim.resource:
s += ['.', lim.resource]
tmp['limit_type'] = "".join(s)
tmp['usage/limit'] = "".join([str(used), '/', str(lim.value)])
tmp['remainder'] = int(lim.value) - int(used)
return tmp
def calc_usage(jobs, attr, name=None, resource=None):
"""
Calculate the usage for the entity
:param attr: Job attribute
:param name: Entity name
:type name: str or None
:param resource: PBS resource
:type resource: str or None
:returns: The usage
"""
usage = {}
# initialize usage of the named entity
if name is not None and name not in ('PBS_GENERIC', 'PBS_ALL'):
usage[name] = 0
for j in jobs:
entity = j[attr]
if entity not in usage:
if resource:
usage[entity] = int(
self.utils.decode_value(
j['Resource_List.' + resource]))
else:
usage[entity] = 1
else:
if resource:
usage[entity] += int(
self.utils.decode_value(
j['Resource_List.' + resource]))
else:
usage[entity] += 1
return usage
self.parse_all_limits(server, queues, db_access)
entities_p = self.entities.values()
linfo = []
cache = {}
if jobs is None:
jobs = self.status(JOB)
for entity in sorted(entities_p, key=lambda e: e.name):
for lim in entity.limits:
_t = entity.type
# skip non-matching entity types. We can't skip the entity
# name due to proper handling of the PBS_GENERIC limits
# we also can't skip overall limits
if (_t != 'o') and (etype is not None and etype != _t):
continue
_n = entity.name
a = {}
if lim.container == QUEUE and lim.container_id is not None:
a['queue'] = (EQ, lim.container_id)
if lim.resource:
resource = 'Resource_List.' + lim.resource
a[resource] = (GT, 0)
a['job_state'] = (EQ, 'R')
a['substate'] = (EQ, 42)
if etype == 'u' and ename is not None:
a['euser'] = (EQ, ename)
else:
a['euser'] = (SET, '')
if etype == 'g' and ename is not None:
a['egroup'] = (EQ, ename)
else:
a['egroup'] = (SET, '')
if etype == 'p' and ename is not None:
a['project'] = (EQ, ename)
else:
a['project'] = (SET, '')
# optimization: cache filtered results
d = None
for v in cache.keys():
if cmp(a, eval(v)) == 0:
d = cache[v]
break
if d is None:
d = self.filter(JOB, a, bslist=jobs, attrop=PTL_AND,
idonly=False, db_access=db_access)
cache[str(a)] = d
if not d or 'job_state=R' not in d:
# in the absence of jobs, display limits defined with usage
# of 0
if ename is not None:
_u = {ename: 0}
else:
_u = {_n: 0}
else:
if _t in ('u', 'o'):
_u = calc_usage(
d['job_state=R'], 'euser', _n, lim.resource)
# an overall limit applies across all running jobs
if _t == 'o':
all_used = sum(_u.values())
for k in _u.keys():
_u[k] = all_used
elif _t == 'g':
_u = calc_usage(
d['job_state=R'], 'egroup', _n, lim.resource)
elif _t == 'p':
_u = calc_usage(
d['job_state=R'], 'project', _n, lim.resource)
for k, used in _u.items():
if not over or (int(used) > int(lim.value)):
if ename is not None and k != ename:
continue
if _n in ('PBS_GENERIC', 'PBS_ALL'):
if k not in ('PBS_GENERIC', 'PBS_ALL'):
k += '/' + _n
elif _n != k:
continue
tmp_linfo = create_linfo(lim, _t, k, used)
linfo.append(tmp_linfo)
del a
del cache
return linfo
def __insert_jobs_in_db(self, jobs, hostname=None):
"""
An experimental interface that converts jobs from file
into entries in the PBS database that can be recovered
upon server restart if all other ``objects``, ``queues``,
``resources``, etc... are already defined.
The interface to PBS used in this method is incomplete
and will most likely cause serious issues. Use only for
development purposes
"""
if not jobs:
return []
if hostname is None:
hostname = socket.gethostname()
# a very crude, and not quite maintainale way to get the flag value
# of an attribute. This is one of the reasons why this conversion
# of jobs is highly experimental
flag_map = {'ctime': 9, 'qtime': 9, 'hop_count': 9, 'queue_rank': 9,
'queue_type': 9, 'etime': 9, 'job_kill_delay': 9,
'run_version': 9, 'job_state': 9, 'exec_host': 9,
'exec_host2': 9, 'exec_vnode': 9, 'mtime': 9, 'stime': 9,
'substate': 9, 'hashname': 9, 'comment': 9, 'run_count': 9,
'schedselect': 13}
state_map = {'Q': 1, 'H': 2, 'W': 3, 'R': 4, 'E': 5, 'X': 6, 'B': 7}
job_attr_stmt = ("INSERT INTO pbs.job_attr (ji_jobid, attr_name, "
"attr_resource, attr_value, attr_flags)")
job_stmt = ("INSERT INTO pbs.job (ji_jobid, ji_sv_name, ji_state, "
"ji_substate,ji_svrflags, ji_numattr,"
" ji_ordering, ji_priority, ji_stime, ji_endtbdry, "
"ji_queue, ji_destin, ji_un_type, ji_momaddr, "
"ji_momport, ji_exitstat, ji_quetime, ji_rteretry, "
"ji_fromsock, ji_fromaddr, ji_4jid, ji_4ash, "
"ji_credtype, ji_qrank, ji_savetm, ji_creattm)")
all_stmts = []
for job in jobs:
keys = []
values = []
flags = []
for k, v in job.items():
if k in ('id', 'Mail_Points', 'Mail_Users'):
continue
keys.append(k)
if not v.isdigit():
values.append("'" + v + "'")
else:
values.append(v)
if k in flag_map:
flags.append(flag_map[k])
elif k.startswith('Resource_List'):
flags.append(15)
else:
flags.append(11)
jobid = job['id'].split('.')[0] + '.' + hostname
for i in range(len(keys)):
stmt = job_attr_stmt
stmt += " VALUES('" + jobid + "', "
if '.' in keys[i]:
k, v = keys[i].split('.')
stmt += "'" + k + "', '" + v + "'" + ", "
else:
stmt += "'" + keys[i] + "', ''" + ", "
stmt += values[i] + "," + str(flags[i])
stmt += ");"
self.logger.debug(stmt)
all_stmts.append(stmt)
js = job['job_state']
svrflags = 1
state = 1
if js in state_map:
state = state_map[js]
if state == 4:
# Other states svrflags aren't handled and will
# cause issues, another reason this is highly experimental
svrflags = 12289
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
stmt = job_stmt
stmt += " VALUES('" + jobid + "', 1, "
stmt += str(state) + ", " + job['substate']
stmt += ", " + str(svrflags)
stmt += ", 0, 0, 0"
if 'stime' in job:
print job['stime']
st = time.strptime(job['stime'], "%a %b %d %H:%M:%S %Y")
stmt += ", " + str(time.mktime(st))
else:
stmt += ", 0"
stmt += ", 0"
stmt += ", '" + job['queue'] + "'"
if 'exec_host2' in job:
stmt += ", " + job['exec_host2']
else:
stmt += ", ''"
stmt += ", 0, 0, 0, 0, 0, 0, 0, 0, '', '', 0, 0"
stmt += ", '" + tm + "', '" + tm + "');"
self.logger.debug(stmt)
all_stmts.append(stmt)
return all_stmts
def clusterize(self, conf_file=None, hosts=None, import_jobs=False,
db_creds_file=None):
"""
Mimic a ``pbs_diag`` snapshot onto a set of hosts running
a PBS ``server``,``scheduler``, and ``MoM``.
This method clones the following information from the diag:
``Server attributes``
``Server resourcedef``
``Hooks``
``Scheduler configuration``
``Scheduler resource_group``
``Scheduler holiday file``
``Per Queue attributes``
Nodes are copied as a vnode definition file inserted into
each host's MoM instance.
Currently no support for cloning the server 'sched' object,
nor to copy nodes to multi-mom instances.
Jobs are copied over only if import_jobs is True, see below
for details
:param asdiag: Path to the pbs_diag snapshot to use
:type asdiag: str
:param conf_file: Configuration file for the MoM instance
:param hosts: List of hosts on which to clone the diag
snapshot
:type hosts: List
:param include_jobs: [Experimental] if True jobs from the
pbs_diag are imported into the host's
database. There are several caveats to
this option:
The scripts are not imported
The users and groups are not created on
the local system.There are no actual
processes created on the MoM for each
job so operations on the job such as
signals or delete will fail (delete -W
force will still work)
:type include_jobs: bool
:param db_creds_file: Path to file containing credentials
to access the DB
:type db_creds_file: str or None
"""
if not self.has_diag:
return
if hosts is None:
return
for h in hosts:
svr = Server(h)
sched = Scheduler(server=svr, diag=self.diag, diagmap=self.diagmap)
try:
svr.manager(MGR_CMD_DELETE, NODE, None, id="")
except:
pass
svr.revert_to_defaults(delqueues=True, delhooks=True)
local = svr.pbs_conf['PBS_HOME']
diag_rdef = os.path.join(self.diag, 'server_priv', 'resourcedef')
diag_sc = os.path.join(self.diag, 'sched_priv', 'sched_config')
diag_rg = os.path.join(self.diag, 'sched_priv', 'resource_group')
diag_hldy = os.path.join(self.diag, 'sched_priv', 'holidays')
nodes = os.path.join(self.diag, 'pbsnodes_va.out')
diag_hooks = os.path.join(self.diag, 'qmgr_ph.out')
diag_ps = os.path.join(self.diag, 'qmgr_ps.out')
local_rdef = os.path.join(local, 'server_priv', 'resourcedef')
local_sc = os.path.join(local, 'sched_priv', 'sched_config')
local_rg = os.path.join(local, 'sched_priv', 'resource_group')
local_hldy = os.path.join(local, 'sched_priv', 'holidays')
_fcopy = [(diag_rdef, local_rdef), (diag_sc, local_sc),
(diag_rg, local_rg), (diag_hldy, local_hldy)]
# Restart since resourcedef may have changed
svr.restart()
if os.path.isfile(diag_ps):
tmp_ps = open(diag_ps)
cmd = [os.path.join(svr.pbs_conf['PBS_EXEC'], 'bin', 'qmgr')]
self.du.run_cmd(h, cmd, stdin=tmp_ps, sudo=True, logerr=False)
tmp_ps.close()
# Unset any site-sensitive attributes
for a in ['pbs_license_info', 'manager', 'operators',
'mail_from', 'acl_roots', 'acl_hosts']:
try:
svr.manager(MGR_CMD_UNSET, SERVER, a, sudo=True)
except:
pass
for (d, l) in _fcopy:
if os.path.isfile(d):
self.logger.info('copying ' + d + ' to ' + l)
self.du.run_copy(h, src=d, dest=l, sudo=True)
diag_sched = self.status(SCHED)
for ds in diag_sched:
for k, v in ds.items():
if k != 'id':
try:
svr.manager(MGR_CMD_SET, SCHED, {k: v},
logerr=False)
except PbsManagerError:
self.logger.warning(
'Skipping sched attribute ' + k)
sched.signal('-HUP')
if os.path.isfile(nodes):
f = open(nodes)
lines = f.readlines()
f.close()
dl = self.utils.convert_to_dictlist(lines)
vdef = self.utils.dictlist_to_vnodedef(dl)
if vdef:
try:
svr.manager(MGR_CMD_DELETE, NODE, None, "")
except:
pass
MoM(h, pbsconf_file=conf_file).insert_vnode_def(vdef)
svr.restart()
svr.manager(MGR_CMD_CREATE, NODE, id=svr.shortname)
# check if any node is associated to a queue.
# This is needed because the queues 'hasnodes' attribute
# does not get set through vnode def update and must be set
# via qmgr. It only needs to be set once, not for each node
qtoset = {}
for n in dl:
if 'queue' in n and n['queue'] not in qtoset:
qtoset[n['queue']] = n['id']
# before setting queue on nodes make sure that the vnode
# def is all set
svr.expect(NODE, {'state=free': (GE, len(dl))}, interval=3)
for k, v in qtoset.items():
svr.manager(MGR_CMD_SET, NODE, {'queue': k}, id=v)
# populate hooks
if os.path.isfile(diag_hooks):
tmp_hook = open(diag_hooks)
cmd = [os.path.join(svr.pbs_conf['PBS_EXEC'], 'bin', 'qmgr')]
self.du.run_cmd(h, cmd, stdin=tmp_hook, sudo=True)
tmp_hook.close()
# import jobs
if import_jobs is not None:
jobs = self.status(JOB)
sql_stmt = self.__insert_jobs_in_db(jobs, h)
print "\n".join(sql_stmt)
if db_creds_file is not None:
pass
class EquivClass(PBSObject):
"""
Equivalence class holds information on a collection of entities
grouped according to a set of attributes
:param attributes: Dictionary of attributes
:type attributes: Dictionary
:param entities: List of entities
:type entities: List
"""
def __init__(self, name, attributes={}, entities=[]):
self.name = name
self.attributes = attributes
self.entities = entities
self.logger = logging.getLogger(__name__)
def add_entity(self, entity):
"""
Add entities
:param entity: Entity to add
:type entity: str
"""
if entity not in self.entities:
self.entities.append(entity)
def __str__(self):
s = [str(len(self.entities)), ":", ":".join(self.name)]
return "".join(s)
def show(self, showobj=False):
"""
Show the entities
:param showobj: If true then show the entities
:type showobj: bool
"""
s = " && ".join(self.name) + ': '
if showobj:
s += str(self.entities)
else:
s += str(len(self.entities))
print s
return s
class Resource(PBSObject):
"""
PBS resource referenced by name, type and flag
:param name: Resource name
:type name: str or None
:param type: Type of resource
"""
def __init__(self, name=None, type=None, flag=None):
PBSObject.__init__(self, name)
self.set_name(name)
self.set_type(type)
self.set_flag(flag)
def set_name(self, name):
"""
Set the resource name
"""
self.name = name
self.attributes['id'] = name
def set_type(self, type):
"""
Set the resource type
"""
self.type = type
self.attributes['type'] = type
def set_flag(self, flag):
"""
Set the flag
"""
self.flag = flag
self.attributes['flag'] = flag
def __str__(self):
s = [self.attributes['id']]
if 'type' in self.attributes:
s.append('type=' + self.attributes['type'])
if 'flag' in self.attributes:
s.append('flag=' + self.attributes['flag'])
return " ".join(s)
class Holidays():
"""
Descriptive calss for Holiday file.
"""
def __init__(self):
self.year = {'id': "YEAR", 'value': None, 'valid': False}
self.weekday = {'id': "weekday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.monday = {'id': "monday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.tuesday = {'id': "tuesday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.wednesday = {'id': "wednesday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.thursday = {'id': "thursday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.friday = {'id': "friday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.saturday = {'id': "saturday", 'p': None, 'np': None,
'valid': None, 'position': None}
self.sunday = {'id': "sunday", 'p': None, 'np': None, 'valid': None,
'position': None}
self.days_set = [] # list of set days
self._days_map = {'weekday': self.weekday, 'monday': self.monday,
'tuesday': self.tuesday, 'wednesday': self.wednesday,
'thursday': self.thursday, 'friday': self.friday,
'saturday': self.saturday, 'sunday': self.sunday}
self.holidays = [] # list of calendar holidays
def __str__(self):
"""
Return the content to write to holidays file as a string
"""
content = []
if self.year['valid']:
content.append(self.year['id'] + "\t" +
self.year['value'])
for i in range(0, len(self.days_set)):
content.append(self.days_set[i]['id'] + "\t" +
self.days_set[i]['p'] + "\t" +
self.days_set[i]['np'])
# Add calendar holidays
for day in self.holidays:
content.append(day)
return "\n".join(content)
class Scheduler(PBSService):
"""
Container of Scheduler related properties
:param hostname: The hostname on which the scheduler instance
is operating
:type hostname: str or None
:param server: A PBS server instance to which this scheduler
is associated
:param pbsconf_file: path to a PBS configuration file
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects (node,server,etc)
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str or None
:param db_acccess: set to either file containing credentials
to DB access or dictionary containing
``{'dbname':...,'user':...,'port':...}``
:type db_access: str or dictionary
"""
# A vanilla scheduler configuration. This set may change based on
# updates to PBS
sched_dflt_config = {
"backfill": "true ALL",
"backfill_prime": "false ALL",
"help_starving_jobs": "true ALL",
"max_starve": "24:00:00",
"strict_ordering": "false ALL",
"provision_policy": "\"aggressive_provision\"",
"preempt_order": "\"SCR\"",
"fairshare_entity": "euser",
"dedicated_prefix": "ded",
"primetime_prefix": "p_",
"nonprimetime_prefix": "np_",
"preempt_queue_prio": "150",
"preempt_prio": "\"express_queue, normal_jobs\"",
"load_balancing": "false ALL",
"prime_exempt_anytime_queues": "false",
"round_robin": "False all",
"fairshare_usage_res": "cput",
"smp_cluster_dist": "pack",
"fair_share": "false ALL",
"preempt_sort": "min_time_since_start",
"node_sort_key": "\"sort_priority HIGH\" ALL",
"sort_queues": "true ALL",
"by_queue": "True ALL",
"preemptive_sched": "true ALL",
"resources": "\"ncpus, mem, arch, host, vnode, aoe\"",
"log_filter": "3328 ",
}
sched_config_options = ["node_group_key",
"dont_preempt_starving",
"fairshare_enforce_no_shares",
"strict_ordering",
"resource_unset_infinite",
"sync_time",
"unknown_shares",
"log_filter",
"dedicated_prefix",
"load_balancing",
"help_starving_jobs",
"max_starve",
"sort_queues",
"backfill",
"primetime_prefix",
"nonprimetime_prefix",
"backfill_prime",
"prime_exempt_anytime_queues",
"prime_spill",
"prime_exempt_anytime_queues",
"prime_spill",
"resources",
"mom_resources",
"smp_cluster_dist",
"preempt_queue_prio",
"preempt_suspend",
"preempt_checkpoint",
"preempt_requeue",
"preemptive_sched",
"dont_preempt_starving",
"node_group_key",
"dont_preempt_starving",
"fairshare_enforce_no_shares",
"strict_ordering",
"resource_unset_infinite",
"provision_policy",
"resv_confirm_ignore",
"allow_aoe_calendar",
"max_job_check",
"preempt_attempts",
"update_comments",
"sort_by",
"key",
"preempt_starving",
"preempt_fairshare",
"load_balancing_rr",
"assign_ssinodes",
"cpus_per_ssinode",
"mem_per_ssinode",
"strict_fifo",
"mem_per_ssinode",
"strict_fifo"
]
fs_re = '(?P<name>[\S]+)[\s]*:[\s]*Grp:[\s]*(?P<Grp>[-]*[0-9]*)' + \
'[\s]*cgrp:[\s]*(?P<cgrp>[-]*[0-9]*)[\s]*' + \
'Shares:[\s]*(?P<Shares>[-]*[0-9]*)[\s]*Usage:[\s]*' + \
'(?P<Usage>[0-9]+)[\s]*Perc:[\s]*(?P<Perc>.*)%'
fs_tag = re.compile(fs_re)
def __init__(self, hostname=None, server=None, pbsconf_file=None,
diagmap={}, diag=None, db_access=None):
self.sched_config_file = None
self.dflt_holidays_file = None
self.holidays_file = None
self.sched_config = {}
self._sched_config_comments = {}
self._config_order = []
self.dedicated_time_file = None
self.dedicated_time = None
self.dedicated_time_as_str = None
self.fairshare_tree = None
self.resource_group = None
self.server = None
self.server_dyn_res = None
self.deletable_files = ['usage']
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(hostname, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
if hostname is None:
hostname = self.server.hostname
self.server.scheduler = self
PBSService.__init__(self, hostname, pbsconf_file=pbsconf_file,
diag=diag, diagmap=diagmap)
_m = ['scheduler ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.pbs_conf = self.server.pbs_conf
self.sched_config_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'sched_config')
self.dflt_sched_config_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc', 'pbs_sched_config')
self.parse_sched_config(self.sched_config_file)
self.dflt_holidays_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc', 'pbs_holidays')
self.holidays_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'holidays')
self.dflt_resource_group_file = os.path.join(self.pbs_conf['PBS_EXEC'],
'etc',
'pbs_resource_group')
self.resource_group_file = os.path.join(self.pbs_conf['PBS_HOME'],
'sched_priv', 'resource_group')
self.fairshare_tree = self.query_fairshare()
rg = self.parse_resource_group(self.hostname, self.resource_group_file)
self.resource_group = rg
try:
attrs = self.server.status(SCHED, level=logging.DEBUG,
db_access=db_access)
if attrs is not None and len(attrs) > 0:
self.attributes = attrs[0]
except (PbsManagerError, PbsStatusError), e:
self.logger.error('Error querying scheduler %s' % e.msg)
self.version = None
self.holidays_obj = Holidays()
self.holidays_parse_file(level=logging.DEBUG)
def isUp(self):
"""
Check for PBS scheduler up
"""
return super(Scheduler, self)._isUp(self)
def signal(self, sig):
"""
Send a signal to PBS scheduler
"""
self.logger.info('scheduler ' + self.shortname + ': sent signal ' +
sig)
return super(Scheduler, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the PBS scheduler pid
"""
return super(Scheduler, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get the all pids for the instance
"""
return super(Scheduler, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the scheduler
:param args: Arguments required to start the scheduler
:type args: str
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list
"""
if args is not None or launcher is not None:
return super(Scheduler, self)._start(inst=self, args=args,
launcher=launcher)
else:
try:
rv = self.pi.start_sched()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the PBS scheduler
:param sig: Signal to stop the PBS scheduler
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping Scheduler on host ' +
self.hostname)
return super(Scheduler, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_sched()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the PBS scheduler
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None, level=logging.INFO):
"""
Match the scheduler logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp, day,
max_attempts, interval, starttime, endtime,
level=level)
def pbs_version(self):
"""
Get the version of the scheduler instance
"""
if self.version:
return self.version
version = self.log_match('pbs_version', tail=False)
if version:
version = version[1].strip().split('=')[1]
else:
version = "unknown"
self.version = LooseVersion(version)
return self.version
def parse_sched_config(self, schd_cnfg=None):
"""
Parse a sceduling configuration file into a dictionary.
Special handling of identical keys ``(e.g., node_sort_key)``
is done by appending a delimiter, '%', between each value
of the key. When printed back to file, each delimited entry
gets written on a line of its own. For example, the python
dictionary entry:
``{'node_sort_key':
["ncpus HIGH unusued" prime", "node_priority HIH"
non-prime"]}``
will get written as:
``node_sort_key: "ncpus HIGH unusued" prime``
``node_sort_key: "node_priority HIGH" non-prime``
Returns sched_config dictionary that gets reinitialized
every time this method is called.
"""
# sched_config is initialized
if self.sched_config:
del(self.sched_config)
self.sched_config = {}
self._sched_config_comments = {}
self._config_order = []
if schd_cnfg is None:
if self.sched_config_file is not None:
schd_cnfg = self.sched_config_file
else:
self.logger.error('no scheduler configuration file to parse')
return False
try:
conf_opts = self.du.cat(self.hostname, schd_cnfg,
sudo=(not self.has_diag),
level=logging.DEBUG2)['out']
except:
self.logger.error('error parsing scheduler configuration')
return False
_comment = []
conf_re = re.compile(
'[#]?[\s]*(?P<conf_id>[\w]+):[\s]*(?P<conf_val>.*)')
for line in conf_opts:
m = conf_re.match(line)
if m:
key = m.group('conf_id')
val = m.group('conf_val')
# line is a comment, it could be a commented out scheduling
# option, or the description of an option. It could also be
# that part of the description is an example setting of the
# option.
# We must keep track of commented out options in order to
# rewrite the configuration in the same order as it was defined
if line.startswith('#'):
if key in self.sched_config_options:
_comment += [line]
if key in self._sched_config_comments:
self._sched_config_comments[key] += _comment
_comment = []
else:
self._sched_config_comments[key] = _comment
_comment = []
if key not in self._config_order:
self._config_order.append(key)
else:
_comment += [line]
continue
if key not in self._sched_config_comments:
self._sched_config_comments[key] = _comment
else:
self._sched_config_comments[key] += _comment
if key not in self._config_order:
self._config_order.append(key)
_comment = []
if key in self.sched_config:
if isinstance(self.sched_config[key], list):
if isinstance(val, list):
self.sched_config[key].extend(val)
else:
self.sched_config[key].append(val)
else:
if isinstance(val, list):
self.sched_config[key] = [self.sched_config[key]]
self.sched_config[key].extend(val)
else:
self.sched_config[key] = [self.sched_config[key],
val]
else:
self.sched_config[key] = val
else:
_comment += [line]
self._sched_config_comments['PTL_SCHED_CONFIG_TAIL'] = _comment
return True
def check_defaults(self, config):
"""
Check the values in argument config against default values
"""
if len(config.keys()) == 0:
return
for k, v in self.sched_dflt_config.items():
if k in config:
s1 = v
s1 = s1.replace(" ", "")
s1 = s1.replace("\t", "").strip()
s2 = config[k]
s2 = s2.replace(" ", "")
s2 = s2.replace("\t", "").strip()
if s1 != s2:
self.logger.debug(k + ' non-default: ' + v +
' != ' + config[k])
def apply_config(self, config=None, validate=True, path=None):
"""
Apply the configuration specified by config
:param config: Configurations to set. Default: self.
sched_config
:param validate: If True (the default) validate that
settings did not yield an error.
Validation is done by parsing the
scheduler log which, in some cases may
be slow and therefore undesirable.
:type validate: bool
:param path: Optional path to file to which configuration
is written. If None, the configuration is
written to PBS_HOME/sched_priv/sched_config
:type path: str
:returns: True on success and False otherwise. Success
means that upon applying the new configuration
the scheduler did not emit an
"Error reading line" in its log file.
"""
if config is None:
config = self.sched_config
if len(config) == 0:
return True
reconfig_time = int(time.time())
try:
(fd, fn) = self.du.mkstemp()
for k in self._config_order:
if k in config:
if k in self._sched_config_comments:
os.write(fd, "\n".join(self._sched_config_comments[k]))
os.write(fd, "\n")
v = config[k]
if isinstance(v, list):
for val in v:
os.write(fd, k + ": " + str(val) + "\n")
else:
os.write(fd, k + ": " + str(v) + "\n")
elif k in self._sched_config_comments:
os.write(fd, "\n".join(self._sched_config_comments[k]))
os.write(fd, "\n")
for k, v in self.sched_config.items():
if k not in self._config_order:
os.write(fd, k + ": " + str(v).strip() + "\n")
if 'PTL_SCHED_CONFIG_TAIL' in self._sched_config_comments:
os.write(fd, "\n".join(
self._sched_config_comments['PTL_SCHED_CONFIG_TAIL']))
os.write(fd, "\n")
os.close(fd)
if path is None:
sp = os.path.join(self.pbs_conf['PBS_HOME'], "sched_priv",
"sched_config")
if self.du.is_localhost(self.hostname):
self.du.run_copy(self.hostname, sp, sp + '.bak', sudo=True)
else:
cmd = ['mv', sp, sp + '.bak']
self.du.run_cmd(self.hostname, cmd, sudo=True)
else:
sp = path
self.du.run_copy(self.hostname, fn, sp, mode=0644, sudo=True)
os.remove(fn)
self.du.chown(self.hostname, path=sp, uid=0, gid=0, sudo=True)
self.logger.debug(self.logprefix + "updated configuration")
except:
m = self.logprefix + 'error in apply_config '
self.logger.error(m + str(traceback.print_exc()))
raise PbsSchedConfigError(rc=1, rv=False, msg=m)
if validate:
self.signal('-HUP')
m = self.log_match("Error reading line", n=10,
starttime=reconfig_time)
if m is None:
# no match, successful config
return True
raise PbsSchedConfigError(rc=1, rv=False, msg=str(m))
return True
def set_sched_config(self, confs={}, apply=True, validate=True):
"""
set a ``sched_config`` property
:param confs: dictionary of key value sched_config entries
:type confs: Dictionary
:param apply: if True (the default), apply configuration.
:type apply: bool
:param validate: if True (the default), validate the
configuration settings.
:type validate: bool
"""
self.logger.info(self.logprefix + "config " + str(confs))
self.sched_config = dict(self.sched_config.items() + confs.items())
if apply:
try:
self.apply_config(validate=validate)
except PbsSchedConfigError:
for k in confs:
del self.sched_config[k]
self.apply_config(validate=validate)
return True
def add_server_dyn_res(self, custom_resource, script_body=None, file=None,
apply=True, validate=True):
"""
Add a server dynamic resource script or file to the scheduler
configuration
:param custom_resource: The name of the custom resource to
define
:type custom_resource: str
:param script_body: The body of the server dynamic resource
:param file: Alternatively to passing the script body, use
the file instead
:type file: str or None
:param apply: if True (the default), apply configuration.
:type apply: bool
:param validate: if True (the default), validate the
configuration settings.
:type validate: bool
"""
if file is not None:
f = open(file)
script_body = f.readlines()
f.close()
else:
(fd, file) = self.du.mkstemp(prefix='PtlPbsSchedConfig')
f = open(file, "w")
f.write(script_body)
f.close()
os.close(fd)
self.server_dyn_res = file
self.logger.info(self.logprefix + "adding server dyn res " + file)
self.logger.info("-" * 30)
self.logger.info(script_body)
self.logger.info("-" * 30)
self.du.chmod(self.hostname, path=file, mode=0755)
a = {'server_dyn_res': '"' + custom_resource + ' !' + file + '"'}
self.set_sched_config(a, apply=apply, validate=validate)
def unset_sched_config(self, name, apply=True):
"""
Delete a ``sched_config`` entry
:param name: the entry to delete from sched_config
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
"""
self.parse_sched_config()
if name not in self.sched_config:
return True
self.logger.info(self.logprefix + "unsetting config " + name)
del self.sched_config[name]
if apply:
return self.apply_config()
def set_dedicated_time_file(self, file):
"""
Set the path to a dedicated time
"""
self.logger.info(self.logprefix + " setting dedicated time file to " +
str(file))
self.dedicated_time_file = file
def revert_to_defaults(self):
"""
Revert scheduler configuration to defaults.
:returns: True on success, False otherwise
"""
self.logger.info(self.logprefix +
"reverting configuration to defaults")
self.server.manager(MGR_CMD_LIST, SCHED)
ignore_attrs = ['id', 'pbs_version', 'sched_host']
unsetattrs = []
for k in self.attributes.keys():
if k not in ignore_attrs:
unsetattrs.append(k)
if len(unsetattrs) > 0:
self.server.manager(MGR_CMD_UNSET, SCHED, unsetattrs)
self.clear_dedicated_time(hup=False)
if self.du.cmp(self.hostname, self.dflt_resource_group_file,
self.resource_group_file) != 0:
self.du.run_copy(self.hostname, self.dflt_resource_group_file,
self.resource_group_file, mode=0644, sudo=True)
if self.server_dyn_res is not None:
self.du.rm(self.hostname, self.server_dyn_res, force=True,
sudo=True)
self.server_dyn_res = None
rc = self.holidays_revert_to_default()
if self.du.cmp(self.hostname, self.dflt_sched_config_file,
self.sched_config_file) != 0:
self.du.run_copy(self.hostname, self.dflt_sched_config_file,
self.sched_config_file, mode=0644, sudo=True)
self.signal('-HUP')
for f in self.deletable_files:
fn = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv', f)
if fn is not None:
self.du.rm(self.hostname, fn, sudo=True, force=True)
self.parse_sched_config()
self.fairshare_tree = None
self.resource_group = None
return self.isUp()
def save_configuration(self, outfile, mode='a'):
"""
Save scheduler configuration
:param outfile: Path to a file to which configuration
is saved
:type outfile: str
:param mode: mode to use to access outfile. Defaults to
append, 'a'.
:type mode: str
:returns: True on success and False otherwise
"""
conf = {}
sconf = {MGR_OBJ_SCHED: conf}
sched_priv = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv')
sc = os.path.join(sched_priv, 'sched_config')
self._save_config_file(conf, sc)
rg = os.path.join(sched_priv, 'resource_group')
self._save_config_file(conf, rg)
dt = os.path.join(sched_priv, 'dedicated_time')
self._save_config_file(conf, dt)
hd = os.path.join(sched_priv, 'holidays')
self._save_config_file(conf, hd)
try:
f = open(outfile, mode)
cPickle.dump(sconf, f)
f.close()
except:
self.logger.error('error saving configuration ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file infile
"""
self._load_configuration(infile, MGR_OBJ_SCHED)
def get_resources(self, exclude=[]):
"""
returns a list of allocatable resources.
:param exclude: if set, excludes the named resources, if
they exist, from the resulting list
:type exclude: List
"""
if 'resources' not in self.sched_config:
return None
resources = self.sched_config['resources']
resources = resources.replace('"', '')
resources = resources.replace(' ', '')
res = resources.split(',')
if len(exclude) > 0:
for e in exclude:
if e in res:
res.remove(e)
return res
def add_resource(self, name, apply=True):
"""
Add a resource to ``sched_config``.
:param name: the resource name to add
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
:returns: True on success and False otherwise.
Return True if the resource is already defined.
"""
# if the sched_config has not been read in yet, parse it
if not self.sched_config:
self.parse_sched_config()
if 'resources' in self.sched_config:
resources = self.sched_config['resources']
resources = resources.replace('"', '')
splitres = [r.strip() for r in resources.split(",")]
if name in splitres:
return True
resources = '"' + resources + ', ' + name + '"'
else:
resources = '"' + name + '"'
return self.set_sched_config({'resources': resources}, apply=apply)
def remove_resource(self, name, apply=True):
"""
Remove a resource to ``sched_config``.
:param name: the resource name to remove
:type name: str
:param apply: if True, apply configuration. Defaults to True
:type apply: bool
:returns: True on success and False otherwise
"""
# if the sched_config has not been read in yet, parse it
if not self.sched_config:
self.parse_sched_config()
if 'resources' in self.sched_config:
resources = self.sched_config['resources']
resources = resources.replace('"', '')
splitres = [r.strip() for r in resources.split(",")]
if name not in splitres:
return True
newres = []
for r in splitres:
if r != name:
newres.append(r)
resources = '"' + ",".join(newres) + '"'
return self.set_sched_config({'resources': resources}, apply=apply)
def holidays_revert_to_default(self, level=logging.INFO):
"""
Revert holidays file to default
"""
self.logger.log(level, self.logprefix +
"reverting holidays file to default")
rc = None
# Copy over the holidays file from PBS_EXEC if it exists
if self.du.cmp(self.hostname, self.dflt_holidays_file,
self.holidays_file) != 0:
ret = self.du.run_copy(self.hostname, self.dflt_holidays_file,
self.holidays_file, mode=0644, sudo=True,
logerr=True)
rc = ret['rc']
# Update the internal data structures for the updated file
self.holidays_parse_file(level=level)
else:
rc = 1
return rc
def holidays_parse_file(self, path=None, obj=None, level=logging.INFO):
"""
Parse the existing holidays file
:param path: optional path to the holidays file to parse
:type path: str or None
:param obj: optional holidays object to be used instead
of internal
:returns: The content of holidays file as a list of lines
"""
self.logger.log(level, self.logprefix + "Parsing holidays file")
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
if path is None:
path = self.holidays_file
lines = self.du.cat(self.hostname, path, sudo=True)['out']
content = [] # valid content to return
self.holidays_delete_entry(
'a', apply=False, obj=obj, level=logging.DEBUG)
for line in lines:
entry = str(line).split()
if len(entry) == 0:
continue
tag = entry[0].lower()
if tag == "year": # initialize year
content.append("\t".join(entry))
obj.year['valid'] = True
if len(entry) > 1:
obj.year['value'] = entry[1]
elif tag in days_map.keys(): # initialize a day
content.append("\t".join(entry))
day = days_map[tag]
day['valid'] = True
days_set.append(day)
day['position'] = len(days_set) - 1
if len(entry) > 1:
day['p'] = entry[1]
if len(entry) > 2:
day['np'] = entry[2]
elif tag.isdigit(): # initialize a holiday
content.append("\t".join(entry))
obj.holidays.append(tag)
else:
pass
return content
def holidays_set_day(self, day_id, prime="", nonprime="", apply=True,
obj=None, level=logging.INFO):
"""
Set prime time values for a day
:param day_id: the day to be set (string)
:type day_id: str
:param prime: the prime time value
:param nonprime: the non-prime time value
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
:returns: The position ``(0-7)`` of the set day
"""
self.logger.log(level, self.logprefix +
"setting holidays file entry for %s",
day_id)
if obj is None:
obj = self.holidays_obj
day = obj._days_map[str(day_id).lower()]
days_set = obj.days_set
if day['valid'] is None: # Fresh entry
days_set.append(day)
day['position'] = len(days_set) - 1
elif day['valid'] is False: # Previously invalidated entry
days_set.insert(day['position'], day)
else:
pass
day['valid'] = True
day['p'] = str(prime)
day['np'] = str(nonprime)
self.logger.debug("holidays_set_day(): changed day struct: " +
str(day))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return day['position']
def holidays_get_day(self, day_id, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:param day_id: either a day's name or "all"
:type day_id: str
:returns: A copy of info about a day/all set days
"""
self.logger.log(level, self.logprefix +
"getting holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_set = obj.days_set
days_map = obj._days_map
if day_id == "all":
return days_set[:]
else:
return days_map[day_id].copy()
def holidays_reposition_day(self, day_id, new_pos, apply=True, obj=None,
level=logging.INFO):
"""
Change position of a day ``(0-7)`` as it appears in the
holidays file
:param day_id: name of the day
:type day_id: str
:param new_pos: new position
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
:returns: The new position of the day
"""
self.logger.log(level, self.logprefix +
"repositioning holidays file entry for " +
day_id + " to position " + str(new_pos))
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
if new_pos == day['position']:
return
# We also want to update order of invalid days, so add them to
# days_set temporarily
invalid_days = []
for name in days_map:
if days_map[name]['valid'] is False:
invalid_days.append(days_map[name])
days_set += invalid_days
# Sort the old list
days_set.sort(key=itemgetter('position'))
# Change position of 'day_id'
day['position'] = new_pos
days_set.remove(day)
days_set.insert(new_pos, day)
# Update the 'position' field
for i in range(0, len(days_set)):
days_set[i]['position'] = i
# Remove invalid days from days_set
len_days_set = len(days_set)
days_set = [days_set[i] for i in range(0, len_days_set)
if days_set[i] not in invalid_days]
self.logger.debug("holidays_reposition_day(): List of days after " +
" re-positioning " + str(day_id) + " is:\n" +
str(days_set))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return new_pos
def holidays_unset_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Unset prime time values for a day
:param day_id: day to unset (string)
:type day_id: str
:param apply: to reflect the changes to file
:param obj: optional holidays object to be used instead
of internal
.. note:: we do not unset the 'valid' field here so the entry
will still be displayed but without any values
"""
self.logger.log(level, self.logprefix +
"unsetting holidays file entry for " + day_id)
if obj is None:
obj = self.holidays_obj
day = obj._days_map[str(day_id).lower()]
day['p'] = ""
day['np'] = ""
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_invalidate_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Remove a day's entry from the holidays file
:param day_id: the day to remove (string)
:type day_id: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"invalidating holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
day['valid'] = False
days_set.remove(day)
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_validate_day(self, day_id, apply=True, obj=None,
level=logging.INFO):
"""
Make valid a previously set day's entry
:param day_id: the day to validate (string)
:type day_id: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
.. note:: The day will retain its previous position in
the file
"""
self.logger.log(level, self.logprefix +
"validating holidays file entry for " +
day_id)
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
day = days_map[str(day_id).lower()]
if day in days_set: # do not insert a pre-existing day
self.logger.debug("holidays_validate_day(): " +
day_id + " is already valid!")
return
day['valid'] = True
days_set.insert(day['position'], day)
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_delete_entry(self, entry_type, idx=None, apply=True,
obj=None, level=logging.INFO):
"""
Delete ``one/all`` entries from holidays file
:param entry_type: 'y':year, 'd':day, 'h':holiday or 'a': all
:type entry_type: str
:param idx: either a day of week (monday, tuesday etc.)
or Julian date of a holiday
:type idx: str or None
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead of
internal
:returns: False if entry_type is invalid, otherwise True
.. note:: The day cannot be validated and will lose it's
position in the file
"""
self.logger.log(level, self.logprefix +
"Deleting entries from holidays file")
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
days_set = obj.days_set
holiday_list = obj.holidays
year = obj.year
if entry_type not in ['a', 'y', 'd', 'h']:
return False
if entry_type == 'y' or entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting year entry from holidays file")
# Delete year entry
year['value'] = None
year['valid'] = False
if entry_type == 'd' or entry_type == 'a':
# Delete one/all day entries
num_days_to_delete = 1
if entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting all days from holidays file")
num_days_to_delete = len(days_set)
for i in range(0, num_days_to_delete):
if (entry_type == 'd'):
self.logger.debug(self.logprefix +
"deleting " + str(idx) +
" entry from holidays file")
day = days_map[str(idx).lower()]
else:
day = days_set[0]
day['p'] = None
day['np'] = None
day['valid'] = None
day['position'] = None
days_set.remove(day)
if entry_type == 'd':
# Correct 'position' field of every day
for i in range(0, len(days_set)):
days_set[i]['position'] = i
if entry_type == 'h' or entry_type == 'a':
# Delete one/all calendar holiday entries
if entry_type == 'a':
self.logger.debug(self.logprefix +
"deleting all holidays from holidays file")
del holiday_list[:]
else:
self.logger.debug(self.logprefix +
"deleting holiday on " + str(idx) +
" from holidays file")
holiday_list.remove(str(idx))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
return True
def holidays_set_year(self, new_year="", apply=True, obj=None,
level=logging.INFO):
"""
Set the year value
:param newyear: year value to set
:type newyear: str
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"setting holidays file year entry to " +
str(new_year))
if obj is None:
obj = self.holidays_obj
year = obj.year
year['value'] = str(new_year)
year['valid'] = True
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_unset_year(self, apply=True, obj=None, level=logging.INFO):
"""
Unset the year value
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"unsetting holidays file year entry")
if obj is None:
obj = self.holidays_obj
obj.year['value'] = ""
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_get_year(self, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:returns: The year entry of holidays file
"""
self.logger.log(level, self.logprefix +
"getting holidays file year entry")
if obj is None:
obj = self.holidays_obj
year = obj.year
return year.copy()
def holidays_add_holiday(self, date=None, apply=True, obj=None,
level=logging.INFO):
"""
Add a calendar holiday to the holidays file
:param date: Date value for the holiday
:param apply: to reflect the changes to file
:type apply: bool
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.log(level, self.logprefix +
"adding holiday " + str(date) +
" to holidays file")
if obj is None:
obj = self.holidays_obj
holiday_list = obj.holidays
if date is not None:
holiday_list.append(str(date))
else:
pass
self.logger.debug("holidays list after adding one: " +
str(holiday_list))
if apply:
self.holidays_write_file(obj=obj, level=logging.DEBUG)
def holidays_get_holidays(self, obj=None, level=logging.INFO):
"""
:param obj: optional holidays object to be used instead
of internal
:returns: The list of holidays in holidays file
"""
self.logger.log(level, self.logprefix +
"retrieving list of holidays")
if obj is None:
obj = self.holidays_obj
holiday_list = obj.holidays
return holiday_list[:]
def _holidays_process_content(self, content, obj=None):
"""
Process a user provided list of holidays file content
:param obj: optional holidays object to be used instead
of internal
"""
self.logger.debug("_holidays_process_content(): " +
"Processing user provided holidays content:\n" +
str(content))
if obj is None:
obj = self.holidays_obj
days_map = obj._days_map
year = obj.year
holiday_list = obj.holidays
days_set = obj.days_set
self.holidays_delete_entry(
'a', apply=False, obj=obj, level=logging.DEBUG)
if content is None:
self.logger.debug("Holidays file was wiped out")
return
for line in content:
entry = line.split()
if len(entry) == 0:
continue
tag = entry[0].lower()
if tag == "year": # initialize self.year
year['valid'] = True
if len(entry) > 1:
year['value'] = entry[1]
elif tag in days_map.keys(): # initialize self.<day>
day = days_map[tag]
day['valid'] = True
days_set.append(day)
day['position'] = len(days_set) - 1
if len(entry) > 1:
day['p'] = entry[1]
if len(entry) > 2:
day['np'] = entry[2]
elif tag.isdigit(): # initialize self.holiday
holiday_list.append(tag)
else:
pass
def holidays_write_file(self, content=None, out_path=None,
hup=True, obj=None, level=logging.INFO):
"""
Write to the holidays file with content ``given/generated``
:param hup: SIGHUP the scheduler after writing the holidays
file
:type hup: bool
:param obj: optional holidays object to be used instead of
internal
"""
self.logger.log(level, self.logprefix +
"Writing to the holidays file")
if obj is None:
obj = self.holidays_obj
if out_path is None:
out_path = self.holidays_file
if content is not None:
self._holidays_process_content(content, obj)
else:
content = str(obj)
self.logger.debug("content being written:\n" + str(content))
(fd, fn) = self.du.mkstemp(self.hostname, body=content)
ret = self.du.run_copy(self.hostname, fn, out_path, mode=0644,
sudo=True)
self.du.rm(self.hostname, fn)
self.du.chown(self.hostname, out_path, uid=0, gid=0,
sudo=True)
if ret['rc'] != 0:
raise PbsSchedConfigError(rc=ret['rc'], rv=ret['out'],
msg=('error applying holidays file' +
ret['err']))
if hup:
rv = self.signal('-HUP')
if not rv:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error applying holidays file')
self.du.chown(self.hostname, path=out_path, uid=0,
gid=0, sudo=True)
return True
def parse_dedicated_time(self, file=None):
"""
Parse the dedicated_time file and populate dedicated times
as both a string dedicated_time array of dictionaries defined
as ``[{'from': datetime, 'to': datetime}, ...]`` as well as a
dedicated_time_as_str array with a string representation of
each entry
:param file: optional file to parse. Defaults to the one under
``PBS_HOME/sched_priv``
:type file: str or None
:returns: The dedicated_time list of dictionaries or None on
error.Return an empty array if dedicated time file
is empty.
"""
self.dedicated_time_as_str = []
self.dedicated_time = []
if file:
dt_file = file
elif self.dedicated_time_file:
dt_file = self.dedicated_time_file
else:
dt_file = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv',
'dedicated_time')
try:
lines = self.du.cat(self.hostname, dt_file, sudo=True)['out']
if lines is None:
return []
for line in lines:
if not line.startswith('#') and len(line) > 0:
self.dedicated_time_as_str.append(line)
(dtime_from, dtime_to) = self.utils.convert_dedtime(line)
self.dedicated_time.append({'from': dtime_from,
'to': dtime_to})
except:
self.logger.error('error in parse_dedicated_time')
return None
return self.dedicated_time
def clear_dedicated_time(self, hup=True):
"""
Clear the dedicated time file
"""
self.parse_dedicated_time()
if ((len(self.dedicated_time) == 0) and
(len(self.dedicated_time_as_str) == 0)):
return True
if self.dedicated_time:
for d in self.dedicated_time:
del d
if self.dedicated_time_as_str:
for d in self.dedicated_time_as_str:
del d
self.dedicated_time = []
self.dedicated_time_as_str = []
dt = "# FORMAT: MM/DD/YYYY HH:MM MM/DD/YYYY HH:MM"
return self.add_dedicated_time(dt, hup=hup)
def add_dedicated_time(self, as_str=None, start=None, end=None, hup=True):
"""
Append a dedicated time entry. The function can be called
in one of two ways, either by passing in start and end as
time values, or by passing as_str, a string that gets
appended to the dedicated time entries and formatted as
follows, note that no check on validity of the format will
be made the function uses strftime to parse the datetime
and will fail if the strftime can not convert the string.
``MM/DD/YYYY HH:MM MM/DD/YYYY HH:MM``
:returns: True on success and False otherwise
"""
if self.dedicated_time is None:
self.parse_dedicated_time()
if start is not None and end is not None:
dtime_from = time.strftime("%m/%d/%Y %H:%M", time.localtime(start))
dtime_to = time.strftime("%m/%d/%Y %H:%M", time.localtime(end))
dedtime = dtime_from + " " + dtime_to
elif as_str is not None:
(dtime_from, dtime_to) = self.utils.convert_dedtime(as_str)
dedtime = as_str
else:
self.logger.warning("no dedicated from/to specified")
return True
for d in self.dedicated_time_as_str:
if dedtime == d:
if dtime_from is None or dtime_to is None:
self.logger.info(self.logprefix +
"dedicated time already defined")
else:
self.logger.info(self.logprefix +
"dedicated time from " + dtime_from +
" to " + dtime_to + " already defined")
return True
if dtime_from is not None and dtime_to is not None:
self.logger.info(self.logprefix +
"adding dedicated time " + dedtime)
self.dedicated_time_as_str.append(dedtime)
if dtime_from is not None and dtime_to is not None:
self.dedicated_time.append({'from': dtime_from, 'to': dtime_to})
try:
(fd, fn) = self.du.mkstemp()
for l in self.dedicated_time_as_str:
os.write(fd, l + '\n')
os.close(fd)
ddfile = os.path.join(self.pbs_conf['PBS_HOME'], 'sched_priv',
'dedicated_time')
self.du.run_copy(self.hostname, fn, ddfile, mode=0644, uid=0,
gid=0, sudo=True)
os.remove(fn)
except:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error adding dedicated time')
if hup:
ret = self.signal('-HUP')
if ret['rc'] != 0:
raise PbsSchedConfigError(rc=1, rv=False,
msg='error adding dedicated time')
return True
def terminate(self):
self.signal('-KILL')
def valgrind(self):
"""
run scheduler instance through valgrind
"""
if self.isUp():
self.terminate()
rv = CliUtils().check_bin('valgrind')
if not rv:
self.logger.error(self.logprefix + 'valgrind not available')
return None
cmd = ['valgrind']
cmd += ["--log-file=" + os.path.join(tempfile.gettempdir(),
'schd.vlgrd')]
cmd += [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_sched')]
return self.du.run_cmd(self.hostname, cmd, sudo=True)
def alloc_to_execvnode(self, chunks):
"""
convert a resource allocation to an execvnode string representation
"""
execvnode = []
for chunk in chunks:
execvnode += ["(" + chunk.vnode]
for res, val in chunk.resources.items():
execvnode += [":" + str(res) + "=" + str(val)]
for vchk in chunk.vchunk:
execvnode += ["+" + vchk.vnode]
for res, val in vchk.resources():
execvnode += [":" + str(res) + "=" + str(val)]
execvnode += [")+"]
if len(execvnode) != 0:
ev = execvnode[len(execvnode) - 1]
ev = ev[:-1]
execvnode[len(execvnode) - 1] = ev
return "".join(execvnode)
def cycles(self, start=None, end=None, firstN=None, lastN=None):
"""
Analyze scheduler log and return cycle information
:param start: Optional setting of the start time to consider
:param end: Optional setting of the end time to consider
:param firstN: Optional setting to consider the given first
N cycles
:param lastN: Optional setting to consider only the given
last N cycles
"""
try:
from ptl.utils.pbs_logutils import PBSSchedulerLog
except:
self.logger.error('error loading ptl.utils.pbs_logutils')
return None
sl = PBSSchedulerLog()
sl.analyze(self.logfile, start, end, self.hostname)
cycles = sl.cycles
if not cycles or cycles is None:
return []
if lastN is not None:
return cycles[-lastN:]
elif firstN is not None:
return cycles[:firstN]
return cycles
def query_fairshare(self, name=None, id=None):
"""
Parse fairshare data using ``pbsfs`` and populates
fairshare_tree.If name or id are specified, return the data
associated to that id.Otherwise return the entire fairshare
tree
"""
if self.has_diag:
return None
tree = FairshareTree()
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False)
if ret['rc'] != 0:
raise PbsFairshareError(rc=ret['rc'], rv=None,
msg=str(ret['err']))
pbsfs = ret['out']
for p in pbsfs:
m = self.fs_tag.match(p)
if m:
usage = int(m.group('Usage'))
perc = float(m.group('Perc'))
nm = m.group('name')
cgrp = int(m.group('cgrp'))
pid = int(m.group('Grp'))
nd = tree.get_node(id=pid)
if nd:
pname = nd.parent_name
else:
pname = None
# if an entity has a negative cgroup it should belong
# to the unknown resource, we work around the fact that
# PBS Pro (up to 13.0) sets this cgroup id to -1 by
# reassigning it to 0
# TODO: cleanup once PBS code is updated
if cgrp < 0:
cgrp = 0
node = FairshareNode(name=nm,
id=cgrp,
parent_id=pid,
parent_name=pname,
nshares=int(m.group('Shares')),
usage=usage,
perc={'TREEROOT': perc})
if perc:
node.prio['TREEROOT'] = float(usage) / perc
if nm == name or id == cgrp:
return node
tree.add_node(node, apply=False)
# now that all nodes are known, update parent and child
# relationship of the tree
tree.update()
for node in tree.nodes.values():
pnode = node._parent
while pnode is not None and pnode.id != 0:
if pnode.perc['TREEROOT']:
node.perc[pnode.name] = \
(node.perc['TREEROOT'] * 100 / pnode.perc[
'TREEROOT'])
if pnode.name in node.perc and node.perc[pnode.name]:
node.prio[pnode.name] = (
node.usage / node.perc[pnode.name])
pnode = pnode._parent
if name:
n = tree.get_node(name)
if n is None:
raise PbsFairshareError(rc=1, rv=None,
msg='Unknown entity ' + name)
return n
if id:
n = tree.get_node(id=id)
raise PbsFairshareError(rc=1, rv=None,
msg='Unknown entity ' + str(id))
return n
return tree
def set_fairshare_usage(self, name=None, usage=None):
"""
Set the fairshare usage associated to a given entity.
:param name: The entity to set the fairshare usage of
:type name: str or None
:param usage: The usage value to set
"""
if self.has_diag:
return True
if name is None:
self.logger.error(self.logprefix + ' an entity name required')
return False
if usage is None:
self.logger.error(self.logprefix + ' a usage is required')
return False
self.stop()
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-s', name, str(usage)]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
self.start()
if ret['rc'] == 0:
return True
return False
def decay_fairshare_tree(self):
"""
Decay the fairshare tree through pbsfs
"""
if self.has_diag:
return True
self.stop()
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-d']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
self.start()
if ret['rc'] == 0:
return True
return False
def cmp_fairshare_entities(self, name1=None, name2=None):
"""
Compare two fairshare entities. Wrapper of ``pbsfs -c e1 e2``
:param name1: name of first entity to compare
:type name1: str or None
:param name2: name of second entity to compare
:type name1: str or None
:returns: the name of the entity of higher priority or None on error
"""
if self.has_diag:
return None
if name1 is None or name2 is None:
self.logger.erro(self.logprefix + 'two fairshare entity names ' +
'required')
return None
pbsfs = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbsfs')
cmd = [pbsfs, '-c', name1, name2]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True)
if ret['rc'] == 0:
return ret['out'][0]
return None
def parse_resource_group(self, hostname=None, resource_group=None):
"""
Parse the Scheduler's ``resource_group`` file
:param hostname: The name of the host from which to parse
resource_group
:type hostname: str or None
:param resource_group: The path to a resource_group file
:type resource_group: str or None
:returns: A fairshare tree
"""
if hostname is None:
hostname = self.hostname
if resource_group is None:
resource_group = self.resource_group_file
# if has_diag is True acces to sched_priv may not require su privilege
ret = self.du.cat(hostname, resource_group, sudo=(not self.has_diag))
if ret['rc'] != 0:
self.logger.error(hostname + ' error reading ' + resource_group)
tree = FairshareTree(hostname, resource_group)
root = FairshareNode('root', -1, parent_id=0, nshares=100)
tree.add_node(root, apply=False)
lines = ret['out']
for line in lines:
line = line.strip()
if not line.startswith("#") and len(line) > 0:
# could have 5th column but we only need the first 4
(name, id, parent, nshares) = line.split()[:4]
node = FairshareNode(name, id, parent_name=parent,
nshares=nshares)
tree.add_node(node, apply=False)
tree.update()
return tree
def add_to_resource_group(self, name, id, parent, nshares):
"""
Add an entry to the resource group file
:param name: The name of the entity to add
:type name: str
:param id: The numeric identifier of the entity to add
:type id: int
:param parent: The name of the parent group
:type parent: str
:param nshares: The number of shares associated to the entity
:type nshares: int
"""
if self.resource_group is None:
self.resource_group = self.parse_resource_group(
self.hostname, self.resource_group_file)
if not self.resource_group:
self.resource_group = FairshareTree(
self.hostname, self.resource_group_file)
return self.resource_group.create_node(name, id, parent_name=parent,
nshares=nshares)
def job_formula(self, jobid=None, starttime=None, max_attempts=5):
"""
Extract formula value out of scheduler log
:param jobid: Optional, the job identifier for which to get
the formula.
:type jobid: str or int
:param starttime: The time at which to start parsing the
scheduler log
:param max_attempts: The number of attempts to search for
formula in the logs
:type max_attempts: int
:returns: If jobid is specified, return the formula value
associated to that job if no jobid is specified,
returns a dictionary mapping job ids to formula
"""
if jobid is None:
jobid = "(?P<jobid>.*)"
_alljobs = True
else:
if isinstance(jobid, int):
jobid = str(jobid)
_alljobs = False
formula_pat = (".*Job;" + jobid +
".*;Formula Evaluation = (?P<fval>.*)")
rv = self.log_match(formula_pat, regexp=True, starttime=starttime,
n='ALL', allmatch=True, max_attempts=5)
ret = {}
if rv:
for _, l in rv:
m = re.match(formula_pat, l)
if m:
if _alljobs:
jobid = m.group('jobid')
ret[jobid] = float(m.group('fval').strip())
if not _alljobs:
if jobid in ret:
return ret[jobid]
else:
return
return ret
class FairshareTree(object):
"""
Object representation of the Scheduler's resource_group
file and pbsfs data
:param hostname: Hostname of the machine
:type hostname: str
"""
du = DshUtils()
def __init__(self, hostname=None, resource_group=None):
self.logger = logging.getLogger(__name__)
self.hostname = hostname
self.resource_group = resource_group
self.nodes = {}
self.root = None
self._next_id = -1
def update_resource_group(self):
if self.resource_group:
(fd, fn) = self.du.mkstemp()
os.write(fd, self.__str__())
os.close(fd)
ret = self.du.run_copy(self.hostname, fn, self.resource_group,
mode=0644, sudo=True)
self.du.chown(self.hostname, self.resource_group, uid=0,
gid=0, sudo=True)
os.remove(fn)
if ret['rc'] != 0:
raise PbsFairshareError(rc=1, rv=False,
msg='error updating resource group')
return True
def update(self):
for node in self.nodes.values():
if node._parent is None:
pnode = self.get_node(id=node.parent_id)
if pnode:
node._parent = pnode
if node not in pnode._child:
pnode._child.append(node)
def _add_node(self, node):
if node.name == 'TREEROOT' or node.name == 'root':
self.root = node
self.nodes[node.name] = node
if node.parent_name in self.nodes:
self.nodes[node.parent_name]._child.append(node)
node._parent = self.nodes[node.parent_name]
def add_node(self, node, apply=True):
"""
add node to the fairshare tree
"""
self._add_node(node)
if apply:
return self.update_resource_group()
return True
def create_node(self, name, id, parent_name, nshares):
"""
Add an entry to the ``resource_group`` file
:param name: The name of the entity to add
:type name: str
:param id: The uniqe numeric identifier of the entity
:type id: int
:param parent: The name of the parent/group of the entity
:type parent: str
:param nshares: The number of shares assigned to this entity
:type nshares: int
:returns: True on success, False otherwise
"""
if name in self.nodes:
self.logger.warning('fairshare: node ' + name + ' already defined')
return True
self.logger.info('creating tree node: ' + name)
node = FairshareNode(name, id, parent_name=parent_name,
nshares=nshares)
self._add_node(node)
return self.update_resource_group()
def get_node(self, name=None, id=None):
"""
Return a node of the fairshare tree identified by either
name or id.
:param name: The name of the entity to query
:type name: str or None
:param id: The id of the entity to query
:returns: The fairshare information of the entity when
found, if not, returns None
.. note:: The name takes precedence over the id.
"""
for node in self.nodes.values():
if name is not None and node.name == name:
return node
if id is not None and node.id == id:
return node
return None
def __batch_status__(self):
"""
Convert fairshare tree object to a batch status format
"""
dat = []
for node in self.nodes.values():
if node.name == 'root':
continue
einfo = {}
einfo['cgroup'] = node.id
einfo['id'] = node.name
einfo['group'] = node.parent_id
einfo['nshares'] = node.nshares
if len(node.prio) > 0:
p = []
for k, v in node.prio.items():
p += ["%s:%d" % (k, int(v))]
einfo['penalty'] = ", ".join(p)
einfo['usage'] = node.usage
if node.perc:
p = []
for k, v in node.perc.items():
p += ["%s:%.3f" % (k, float(v))]
einfo['shares_perc'] = ", ".join(p)
ppnode = self.get_node(id=node.parent_id)
if ppnode:
ppname = ppnode.name
ppid = ppnode.id
else:
ppnode = self.get_node(name=node.parent_name)
if ppnode:
ppname = ppnode.name
ppid = ppnode.id
else:
ppname = ''
ppid = None
einfo['parent'] = "%s (%s) " % (str(ppid), ppname)
dat.append(einfo)
return dat
def get_next_id(self):
self._next_id -= 1
return self._next_id
def __repr__(self):
return self.__str__()
def _dfs(self, node, dat):
if node.name != 'root':
s = []
if node.name is not None:
s += [node.name]
if node.id is not None:
s += [str(node.id)]
if node.parent_name is not None:
s += [node.parent_name]
if node.nshares is not None:
s += [str(node.nshares)]
if node.usage is not None:
s += [str(node.usage)]
dat.append("\t".join(s))
for n in node._child:
self._dfs(n, dat)
def __str__(self):
dat = []
if self.root:
self._dfs(self.root, dat)
if len(dat) > 0:
dat += ['\n']
return "\n".join(dat)
class FairshareNode(object):
"""
Object representation of the fairshare data as queryable through
the command ``pbsfs``.
:param nshares: Number of shares
:type nshares: int or None
:param usage: Fairshare usage
:param perc: Percentage the entity has of the tree
"""
def __init__(self, name=None, id=None, parent_name=None, parent_id=None,
nshares=None, usage='unknown', perc=None):
self.name = name
self.id = id
self.parent_name = parent_name
self.parent_id = parent_id
self.nshares = nshares
self.usage = usage
self.perc = perc
self.prio = {}
self._parent = None
self._child = []
def __str__(self):
ret = []
if self.name is not None:
ret.append(self.name)
if self.id is not None:
ret.append(str(self.id))
if self.parent_name is not None:
ret.append(str(self.parent_name))
if self.nshares is not None:
ret.append(str(self.nshares))
if self.usage is not None:
ret.append(str(self.usage))
if self.perc is not None:
ret.append(str(self.perc))
return "\t".join(ret)
class MoM(PBSService):
"""
Container for MoM properties.
Provides various MoM operations, such as creation, insertion,
deletion of vnodes.
:param name: The hostname of the server. Defaults to calling
pbs_default()
:type name: str or None
:param attrs: Dictionary of attributes to set, these will
override defaults.
:type attrs: Dictionary
:param pbsconf_file: path to config file to parse for
``PBS_HOME``, ``PBS_EXEC``, etc
:type pbsconf_file: str or None
:param diagmap: A dictionary of PBS objects ``(node,server,etc)``
to mapped files from PBS diag directory
:type diagmap: Dictionary
:param diag: path to PBS diag directory (This will overrides
diagmap)
:type diag: str or None
:param server: A PBS server instance to which this mom is associated
:param db_acccess: set to either file containing credentials to DB
access or dictionary containing
{'dbname':...,'user':...,'port':...}
:type db_access: str or dictionary
"""
dflt_attributes = {}
conf_to_cmd_map = {'PBS_MOM_SERVICE_PORT': '-M',
'PBS_MANAGER_SERVICE_PORT': '-R',
'PBS_HOME': '-d'}
def __init__(self, name=None, attrs={}, pbsconf_file=None, diagmap={},
diag=None, server=None, db_access=None):
self.logger = logging.getLogger(__name__)
if server is not None:
self.server = server
if diag is None and self.server.diag is not None:
diag = self.server.diag
if (len(diagmap) == 0) and (len(self.server.diagmap) != 0):
diagmap = self.server.diagmap
else:
self.server = Server(name, pbsconf_file=pbsconf_file,
db_access=db_access, diag=diag,
diagmap=diagmap)
PBSService.__init__(self, name, attrs, self.dflt_attributes,
pbsconf_file, diag=diag, diagmap=diagmap)
_m = ['mom ', self.shortname]
if pbsconf_file is not None:
_m += ['@', pbsconf_file]
_m += [': ']
self.logprefix = "".join(_m)
self.pi = PBSInitServices(hostname=self.hostname,
conf=self.pbs_conf_file)
self.configd = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv',
'config.d')
self.config = {}
self.dflt_config = {'$clienthost': self.server.hostname}
self.version = None
self._is_cpuset_mom = None
def isUp(self):
"""
Check for PBS mom up
"""
return super(MoM, self)._isUp(self)
def signal(self, sig):
"""
Send signal to PBS mom
"""
self.logger.info(self.logprefix + 'sent signal ' + sig)
return super(MoM, self)._signal(sig, inst=self)
def get_pid(self):
"""
Get the PBS mom pid
"""
return super(MoM, self)._get_pid(inst=self)
def all_instance_pids(self):
"""
Get all pids of a instance
"""
return super(MoM, self)._all_instance_pids(inst=self)
def start(self, args=None, launcher=None):
"""
Start the PBS mom
:param args: Arguments to start the mom
:type args: str or None
:param launcher: Optional utility to invoke the launch of the service
:type launcher: str or list or None
"""
if args is not None or launcher is not None:
return super(MoM, self)._start(inst=self, args=args,
cmd_map=self.conf_to_cmd_map,
launcher=launcher)
else:
try:
rv = self.pi.start_mom()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return rv
def stop(self, sig=None):
"""
Stop the PBS mom
:param sig: Signal to stop the PBS mom
:type sig: str
"""
if sig is not None:
self.logger.info(self.logprefix + 'stopping MoM on host ' +
self.hostname)
return super(MoM, self)._stop(sig, inst=self)
else:
try:
self.pi.stop_mom()
except PbsInitServicesError as e:
raise PbsServiceError(rc=e.rc, rv=e.rv, msg=e.msg)
return True
def restart(self):
"""
Restart the PBS mom
"""
if self.isUp():
if not self.stop():
return False
return self.start()
def log_match(self, msg=None, id=None, n=50, tail=True, allmatch=False,
regexp=False, day=None, max_attempts=1, interval=1,
starttime=None, endtime=None):
"""
Match the PBS mom logs
"""
return self._log_match(self, msg, id, n, tail, allmatch, regexp, day,
max_attempts, interval, starttime, endtime)
def pbs_version(self):
"""
Get the PBS version
"""
if self.version:
return self.version
exe = os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')
version = self.du.run_cmd(self.hostname,
[exe, '--version'], sudo=True)['out']
if version:
self.logger.debug(version)
# in some cases pbs_mom --version may return multiple lines, we
# only care about the one that carries pbs_version information
for ver in version:
if 'pbs_version' in ver:
version = ver.split('=')[1].strip()
break
else:
version = self.log_match('pbs_version', tail=False)
if version:
version = version[1].strip().split('=')[1].strip()
else:
version = "unknown"
self.version = LooseVersion(version)
return self.version
def delete_vnodes(self):
rah = ATTR_rescavail + '.host'
rav = ATTR_rescavail + '.vnode'
a = {rah: self.hostname, rav: None}
try:
_vs = self.server.status(HOST, a, id=self.hostname)
except PbsStatusError:
try:
_vs = self.server.status(HOST, a, id=self.shortname)
except PbsStatusError as e:
if e.msg[0].endswith('Server has no node list'):
_vs = []
else:
raise e
vs = []
for v in _vs:
if v[rav].split('.')[0] != v[rah].split('.')[0]:
vs.append(v['id'])
if len(vs) > 0:
self.server.manager(MGR_CMD_DELETE, VNODE, id=vs)
def revert_to_defaults(self, delvnodedefs=True):
"""
1. ``Revert MoM configuration to defaults.``
2. ``Remove epilogue and prologue``
3. ``Delete all vnode definitions
HUP MoM``
:param delvnodedefs: if True (the default) delete all vnode
definitions and restart the MoM
:type delvnodedefs: bool
:returns: True on success and False otherwise
"""
self.logger.info(self.logprefix +
'reverting configuration to defaults')
restart = False
if not self.has_diag:
self.delete_pelog()
if delvnodedefs and self.has_vnode_defs():
restart = True
if not self.delete_vnode_defs():
return False
self.delete_vnodes()
if cmp(self.config, self.dflt_config) != 0:
self.apply_config(self.dflt_config, hup=False, restart=False)
if restart:
self.restart()
else:
self.signal('-HUP')
return self.isUp()
return True
def save_configuration(self, outfile, mode='a'):
"""
Save a MoM ``mom_priv/config``
:param outfile: the output file to which onfiguration is
saved
:type outfile: str
:param mode: the mode in which to open outfile to save
configuration.
:type mode: str
:returns: True on success, False on error
.. note:: first object being saved should open this file
with 'w' and subsequent calls from other objects
should save with mode 'a' or 'a+'. Defaults to a+
"""
conf = {}
mconf = {MGR_OBJ_NODE: conf}
mpriv = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv')
cf = os.path.join(mpriv, 'config')
self._save_config_file(conf, cf)
if os.path.isdir(os.path.join(mpriv, 'config.d')):
for f in os.listdir(os.path.join(mpriv, 'config.d')):
self._save_config_file(conf,
os.path.join(mpriv, 'config.d', f))
try:
f = open(outfile, mode)
cPickle.dump(mconf, f)
f.close()
except:
self.logger.error('error saving configuration to ' + outfile)
return False
return True
def load_configuration(self, infile):
"""
load configuration from saved file infile
"""
self._load_configuration(infile, MGR_OBJ_NODE)
def is_cray(self):
"""
Returns True if the version of PBS used was built for Cray platforms
"""
rv = self.log_match("alps_client", tail=False, max_attempts=10)
if rv:
return True
return False
def is_cpuset_mom(self):
"""
Check for cpuset mom
"""
if self._is_cpuset_mom is not None:
return self._is_cpuset_mom
raa = ATTR_rescavail + '.arch'
a = {raa: None}
try:
rv = self.server.status(NODE, a, id=self.shortname)
except PbsStatusError:
try:
rv = self.server.status(NODE, a, id=self.hostname)
except PbsStatusError as e:
if e.msg[0].endswith('Server has no node list'):
return False
else:
raise e
if rv[0][raa] == 'linux_cpuset':
self._is_cpuset_mom = True
else:
self._is_cpuset_mom = False
return self._is_cpuset_mom
def create_vnode_def(self, name, attrs={}, numnodes=1, sharednode=True,
pre='[', post=']', usenatvnode=False, attrfunc=None,
vnodes_per_host=1):
"""
Create a vnode definition string representation
:param name: The prefix for name of vnode to create,
name of vnode will be prefix + pre + <num> +
post
:type name: str
:param attrs: Dictionary of attributes to set on each vnode
:type attrs: Dictionary
:param numnodes: The number of vnodes to create
:type numnodes: int
:param sharednode: If true vnodes are shared on a host
:type sharednode: bool
:param pre: The symbol preceding the numeric value of that
vnode.
:type pre: str
:param post: The symbol following the numeric value of that
vnode.
:type post: str
:param usenatvnode: use the natural vnode as the first vnode
to allocate this only makes sense
starting with PBS 11.3 when natural
vnodes are reported as a allocatable
:type usenatvnode: bool
:param attrfunc: function to customize the attributes,
signature is (name, numnodes, curnodenum,
attrs), must return a dict that contains
new or modified attrs that will be added to
the vnode def. The function is called once
per vnode being created, it does not modify
attrs itself across calls.
:param vnodes_per_host: number of vnodes per host
:type vnodes_per_host: int
:returns: A string representation of the vnode definition
file
"""
sethost = False
attribs = attrs.copy()
if not sharednode and 'resources_available.host' not in attrs:
sethost = True
if attrfunc is None:
customattrs = attribs
vdef = ["$configversion 2"]
# altering the natural vnode information
if numnodes == 0:
for k, v in attribs.items():
vdef += [name + ": " + str(k) + "=" + str(v)]
else:
if usenatvnode:
if attrfunc:
customattrs = attrfunc(name, numnodes, "", attribs)
for k, v in customattrs.items():
vdef += [self.shortname + ": " + str(k) + "=" + str(v)]
# account for the use of the natural vnode
numnodes -= 1
else:
# ensure that natural vnode is not allocatable by the scheduler
vdef += [self.shortname + ": resources_available.ncpus=0"]
vdef += [self.shortname + ": resources_available.mem=0"]
for n in xrange(numnodes):
vnid = name + pre + str(n) + post
if sethost:
if vnodes_per_host > 1:
if n % vnodes_per_host == 0:
_nid = vnid
else:
_nid = name + pre + str(n - n % vnodes_per_host) + post
attribs['resources_available.host'] = _nid
else:
attribs['resources_available.host'] = vnid
if attrfunc:
customattrs = attrfunc(vnid, numnodes, n, attribs)
for k, v in customattrs.items():
vdef += [vnid + ": " + str(k) + "=" + str(v)]
if numnodes == 0:
nn = 1
else:
nn = numnodes
if numnodes > 1:
vnn_msg = ' vnodes '
else:
vnn_msg = ' vnode '
self.logger.info(self.logprefix + 'created ' + str(nn) +
vnn_msg + name + ' with attr ' +
str(attribs) + ' on host ' + self.hostname)
vdef += ["\n"]
del attribs
return "\n".join(vdef)
def parse_config(self):
"""
Parse mom config file into a dictionary of configuration
options.
:returns: A dictionary of configuration options on success,
and None otherwise
"""
try:
mconf = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv',
'config')
ret = self.du.cat(self.hostname, mconf, sudo=True)
if ret['rc'] != 0:
self.logger.error('error parsing configuration file')
return None
self.config = {}
lines = ret['out']
for line in lines:
(k, v) = line.split()
if k in self.config:
if isinstance(self.config[k], list):
self.config[k].append(v)
else:
self.config[k] = [self.config[k], v]
else:
self.config[k] = v
except:
self.logger.error('error in parse_config')
return None
return self.config
def add_config(self, conf={}, hup=True):
"""
Add config options to mom_priv_config.
:param conf: The configurations to add to ``mom_priv/config``
:type conf: Dictionary
:param hup: If True (default) ``HUP`` the MoM
:type hup: bool
:returns: True on success and False otherwise
"""
doconfig = False
if not self.config:
self.parse_config()
mc = self.config
if mc is None:
mc = {}
for k, v in conf.items():
if k in mc and (mc[k] == v or (isinstance(v, list) and
mc[k] in v)):
self.logger.debug(self.logprefix + 'config ' + k +
' already set to ' + str(v))
continue
else:
doconfig = True
break
if not doconfig:
return True
self.logger.info(self.logprefix + "config " + str(conf))
return self.apply_config(conf, hup)
def unset_mom_config(self, name, hup=True):
"""
Delete a mom_config entry
:param name: The entry to remove from ``mom_priv/config``
:type name: String
:param hup: if True (default) ``HUP`` the MoM
:type hup: bool
:returns: True on success and False otherwise
"""
mc = self.parse_config()
if mc is None or name not in mc:
return True
self.logger.info(self.logprefix + "unsetting config " + name)
del mc[name]
return self.apply_config(mc, hup)
def apply_config(self, conf={}, hup=True, restart=False):
"""
Apply configuration options to MoM.
:param conf: A dictionary of configuration options to apply
to MoM
:type conf: Dictionary
:param hup: If True (default) , HUP the MoM to apply the
configuration
:type hup: bool
:returns: True on success and False otherwise.
"""
self.config = dict(self.config.items() + conf.items())
try:
(_, fn) = self.du.mkstemp()
f = open(fn, 'w+')
for k, v in self.config.items():
if isinstance(v, list):
for eachprop in v:
f.write(str(k) + ' ' + str(eachprop) + '\n')
else:
f.write(str(k) + ' ' + str(v) + '\n')
f.close()
dest = os.path.join(
self.pbs_conf['PBS_HOME'], 'mom_priv', 'config')
self.du.run_copy(self.hostname, fn, dest, mode=0644, sudo=True)
self.du.chown(self.hostname, path=dest, uid=0, gid=0, sudo=True)
os.remove(fn)
except:
raise PbsMomConfigError(rc=1, rv=False,
msg='error processing add_config')
if restart:
return self.restart()
elif hup:
return self.signal('-HUP')
return True
def get_vnode_def(self, vnodefile=None):
"""
:returns: A vnode def file as a single string
"""
if vnodefile is None:
return None
f = open(vnodefile)
lines = f.readlines()
f.close()
return "".join(lines)
def insert_vnode_def(self, vdef, fname=None, additive=False, restart=True):
"""
Insert and enable a vnode definition. Root privilege
is required
:param vdef: The vnode definition string as created by
create_vnode_def
:type vdef: str
:param fname: The filename to write the vnode def string to
:type fname: str or None
:param additive: If True, keep all other vnode def files
under config.d Default is False
:type additive: bool
:param delete: If True, delete all nodes known to the server.
Default is True
:type delete: bool
:param restart: If True, restart the MoM. Default is True
:type restart: bool
"""
try:
(fd, fn) = self.du.mkstemp(self.hostname)
os.write(fd, vdef)
os.close(fd)
except:
raise PbsMomConfigError(rc=1, rv=False,
msg="Failed to insert vnode definition")
if fname is None:
fname = 'pbs_vnode.def'
if not additive:
self.delete_vnode_defs()
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'insert', fname, fn]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
self.du.rm(hostname=self.hostname, path=fn, force=True)
if ret['rc'] != 0:
raise PbsMomConfigError(rc=1, rv=False, msg="\n".join(ret['err']))
msg = self.logprefix + 'inserted vnode definition file '
msg += fname + ' on host: ' + self.hostname
self.logger.info(msg)
if restart:
self.restart()
def has_vnode_defs(self):
"""
Check for vnode definition(s)
"""
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'list']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
if ret['rc'] == 0:
files = [x for x in ret['out'] if not x.startswith('PBS')]
if len(files) > 0:
return True
else:
return False
else:
return False
def delete_vnode_defs(self, vdefname=None):
"""
delete vnode definition(s) on this MoM
:param vdefname: name of a vnode definition file to delete,
if None all vnode definitions are deleted
:type vdefname: str
:returns: True if delete succeed otherwise False
"""
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin', 'pbs_mom')]
cmd += ['-s', 'list']
ret = self.du.run_cmd(self.hostname, cmd, sudo=True, logerr=False,
level=logging.INFOCLI)
if ret['rc'] != 0:
return False
rv = True
if len(ret['out']) > 0:
for vnodedef in ret['out']:
vnodedef = vnodedef.strip()
if (vnodedef == vdefname) or vdefname is None:
if vnodedef.startswith('PBS'):
continue
cmd = [os.path.join(self.pbs_conf['PBS_EXEC'], 'sbin',
'pbs_mom')]
cmd += ['-s', 'remove', vnodedef]
ret = self.du.run_cmd(self.hostname, cmd, sudo=True,
logerr=False, level=logging.INFOCLI)
if ret['rc'] != 0:
return False
else:
rv = True
return rv
def has_pelog(self, filename=None):
"""
Check for prologue and epilogue
"""
_has_pro = False
_has_epi = False
phome = self.pbs_conf['PBS_HOME']
prolog = os.path.join(phome, 'mom_priv', 'prologue')
epilog = os.path.join(phome, 'mom_priv', 'epilogue')
if self.du.isfile(self.hostname, path=prolog, sudo=True):
_has_pro = True
if filename == 'prologue':
return _has_pro
if self.du.isfile(self.hostname, path=epilog, sudo=True):
_has_epi = True
if filename == 'epilogue':
return _has_pro
if _has_epi or _has_pro:
return True
return False
def has_prologue(self):
"""
Check for prologue
"""
return self.has_pelog('prolouge')
def has_epilogue(self):
"""
Check for epilogue
"""
return self.has_pelog('epilogue')
def delete_pelog(self):
"""
Delete any prologue and epilogue files that may have been
defined on this MoM
"""
phome = self.pbs_conf['PBS_HOME']
prolog = os.path.join(phome, 'mom_priv', 'prologue')
epilog = os.path.join(phome, 'mom_priv', 'epilogue')
ret = self.du.rm(self.hostname, epilog, force=True,
sudo=True, logerr=False)
if ret:
ret = self.du.rm(self.hostname, prolog, force=True,
sudo=True, logerr=False)
if not ret:
self.logger.error('problem deleting prologue/epilogue')
# we don't bail because the problem may be that files did not
# exist. Let tester fix the issue
return ret
def create_pelog(self, body=None, src=None, filename=None):
"""
create ``prologue`` and ``epilogue`` files, functionality
accepts either a body of the script or a source file.
:returns: True on success and False on error
"""
if self.has_diag:
_msg = 'MoM is in loaded from diag so bypassing pelog creation'
self.logger.info(_msg)
return False
if (src is None and body is None) or (filename is None):
self.logger.error('file and body of script are required')
return False
pelog = os.path.join(self.pbs_conf['PBS_HOME'], 'mom_priv', filename)
self.logger.info(self.logprefix +
' creating ' + filename + ' with body\n' + '---')
if body is not None:
self.logger.info(body)
(fd, src) = self.du.mkstemp(prefix='pbs-pelog')
os.write(fd, body)
os.close(fd)
elif src is not None:
_b = open(src)
self.logger.info("\n".join(_b.readlines()))
_b.close()
self.logger.info('---')
ret = self.du.run_copy(self.hostname, src, pelog, sudo=True)
if body is not None:
os.remove(src)
if ret['rc'] != 0:
self.logger.error('error creating pelog ')
return False
ret = self.du.chown(self.hostname, path=pelog, uid=0, gid=0, sudo=True,
logerr=False)
if not ret:
self.logger.error('error chowning pelog to root')
return False
ret = self.du.chmod(self.hostname, path=pelog, mode=0755, sudo=True)
if not ret:
self.logger.error('error changing mode of pelog')
return False
return True
def prologue(self, body=None, src=None):
"""
create prologue
"""
return self.create_pelog(body, src, 'prologue')
def epilogue(self, body=None, src=None):
"""
Create epilogue
"""
return self.create_pelog(body, src, 'epilogue')
def action(self, act, script):
"""
Define action script. Not currently implemented
"""
pass
class Hook(PBSObject):
"""
PBS hook objects. Holds attributes information and pointer
to server
:param name: Hook name
:type name: str or None
:param attrs: Hook attributes
:type attrs: Dictionary
:param server: Pointer to server
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, server=None):
self.logger = logging.getLogger(__name__)
PBSObject.__init__(self, name, attrs, self.dflt_attributes)
self.server = server
class ResourceResv(PBSObject):
"""
Generic PBS resource reservation, i.e., job or
``advance/standing`` reservation
"""
def execvnode(self, attr='exec_vnode'):
"""
PBS type execution vnode
"""
if attr in self.attributes:
return PbsTypeExecVnode(self.attributes[attr])
else:
return None
def exechost(self):
"""
PBS type execution host
"""
if 'exec_host' in self.attributes:
return PbsTypeExecHost(self.attributes['exec_host'])
else:
return None
def select(self):
if hasattr(self, '_select') and self._select is not None:
return self._select
if 'schedselect' in self.attributes:
self._select = PbsTypeSelect(self.attributes['schedselect'])
elif 'select' in self.attributes:
self._select = PbsTypeSelect(self.attributes['select'])
else:
return None
return self._select
@classmethod
def get_hosts(cls, exechost=None):
"""
:returns: The hosts portion of the exec_host
"""
hosts = []
exechosts = cls.utils.parse_exechost(exechost)
if exechosts:
for h in exechosts:
eh = h.keys()[0]
if eh not in hosts:
hosts.append(eh)
return hosts
def get_vnodes(self, execvnode=None):
"""
:returns: The unique vnode names of an execvnode as a list
"""
if execvnode is None:
if 'exec_vnode' in self.attributes:
execvnode = self.attributes['exec_vnode']
elif 'resv_nodes' in self.attributes:
execvnode = self.attributes['resv_nodes']
else:
return []
vnodes = []
execvnodes = PbsTypeExecVnode(execvnode)
if execvnodes:
for n in execvnodes:
ev = n.keys()[0]
if ev not in vnodes:
vnodes.append(ev)
return vnodes
def walltime(self, attr='Resource_List.walltime'):
if attr in self.attributes:
return self.utils.convert_duration(self.attributes[attr])
class Job(ResourceResv):
"""
PBS Job. Attributes and Resources
:param username: Job username
:type username: str or None
:param attrs: Job attributes
:type attrs: Dictionary
:param jobname: Name of the PBS job
:type jobname: str or None
"""
dflt_attributes = {
ATTR_N: 'STDIN',
ATTR_j: 'n',
ATTR_m: 'a',
ATTR_p: '0',
ATTR_r: 'y',
ATTR_k: 'oe',
}
runtime = 100
logger = logging.getLogger(__name__)
def __init__(self, username=None, attrs={}, jobname=None):
self.server = {}
self.script = None
self.script_body = None
if username is not None:
self.username = str(username)
else:
self.username = None
self.du = None
self.interactive_handle = None
PBSObject.__init__(self, None, attrs, self.dflt_attributes)
if jobname is not None:
self.custom_attrs[ATTR_N] = jobname
self.attributes[ATTR_N] = jobname
self.set_variable_list(self.username)
self.set_sleep_time(100)
def set_variable_list(self, user=None, workdir=None):
"""
Customize the ``Variable_List`` job attribute to ``<user>``
"""
if user is None:
userinfo = pwd.getpwuid(os.getuid())
user = userinfo[0]
homedir = userinfo[5]
else:
try:
homedir = pwd.getpwnam(user)[5]
except:
homedir = ""
self.username = user
s = ['PBS_O_HOME=' + homedir]
s += ['PBS_O_LANG=en_US.UTF-8']
s += ['PBS_O_LOGNAME=' + user]
s += ['PBS_O_PATH=/usr/bin:/bin:/usr/bin:/usr/local/bin']
s += ['PBS_O_MAIL=/var/spool/mail/' + user]
s += ['PBS_O_SHELL=/bin/bash']
s += ['PBS_O_SYSTEM=Linux']
if workdir is not None:
wd = workdir
else:
wd = os.getcwd()
s += ['PBS_O_WORKDIR=' + str(wd)]
self.attributes[ATTR_v] = ",".join(s)
self.set_attributes()
def set_sleep_time(self, duration):
"""
Set the sleep duration for this job.
:param duration: The duration, in seconds, to sleep
:type duration: int
"""
self.set_execargs('/bin/sleep', duration)
def set_execargs(self, executable, arguments=None):
"""
Set the executable and arguments to use for this job
:param executable: path to an executable. No checks are made.
:type executable: str
:param arguments: arguments to executable.
:type arguments: str or list or int
"""
msg = ['job: executable set to ' + str(executable)]
if arguments is not None:
msg += [' with arguments: ' + str(arguments)]
self.logger.info("".join(msg))
self.attributes[ATTR_executable] = executable
if arguments is not None:
args = ''
xml_beginargs = '<jsdl-hpcpa:Argument>'
xml_endargs = '</jsdl-hpcpa:Argument>'
if isinstance(arguments, list):
for a in arguments:
args += xml_beginargs + str(a) + xml_endargs
elif isinstance(arguments, str):
args = xml_beginargs + arguments + xml_endargs
elif isinstance(arguments, int):
args = xml_beginargs + str(arguments) + xml_endargs
self.attributes[ATTR_Arglist] = args
else:
self.unset_attributes([ATTR_Arglist])
self.set_attributes()
def create_script(self, body=None, uid=None, gid=None, hostname=None):
"""
Create a job script from a given body of text into a
temporary location
:param body: the body of the script
:param owner: Optionally the user to own this script,
defaults ot current user
:type owner: str
:param hostname: The host on which the job script is to
be created
:type hostname: str or None
"""
if body is None:
return None
if isinstance(body, list):
body = '\n'.join(body)
self.script_body = body
if self.du is None:
self.du = DshUtils()
# First create the temporary file as current user and only change
# its mode once the current user has written to it
(fd, fn) = self.du.mkstemp(hostname, prefix='PtlPbsJobScript', uid=uid,
gid=gid, mode=0755, body=body)
os.close(fd)
if not self.du.is_localhost(hostname):
self.du.run_copy(hostname, fn, fn)
self.script = fn
return fn
class Reservation(ResourceResv):
"""
PBS Reservation. Attributes and Resources
:param attrs: Reservation attributes
:type attrs: Dictionary
"""
dflt_attributes = {}
def __init__(self, username=None, attrs={}):
self.server = {}
self.script = None
self.attributes = attrs
if username is None:
userinfo = pwd.getpwuid(os.getuid())
self.username = userinfo[0]
else:
self.username = str(username)
# These are not in dflt_attributes because of the conversion to CLI
# options is done strictly
if ATTR_resv_start not in attrs:
attrs[ATTR_resv_start] = str(int(time.time()) + 36 * 3600)
if ATTR_resv_end not in attrs:
if ATTR_resv_duration not in attrs:
attrs[ATTR_resv_end] = str(int(time.time()) + 72 * 3600)
PBSObject.__init__(self, None, attrs, self.dflt_attributes)
self.set_attributes()
def set_variable_list(self, user, workdir=None):
pass
class InteractiveJob(threading.Thread):
"""
An Interactive Job thread
Interactive Jobs are submitted as a thread that sets the jobid
as soon as it is returned by ``qsub -I``, such that the caller
can get back to monitoring the state of PBS while the interactive
session goes on in the thread.
The commands to be run within an interactive session are
specified in the job's interactive_script attribute as a list of
tuples, where the first item in each tuple is the command to run,
and the subsequent items are the expected returned data.
Implementation details:
Support for interactive jobs is currently done through the
pexpect module which must be installed separately from PTL.
Interactive jobs are submitted through ``CLI`` only, there is no
API support for this operation yet.
The submission of an interactive job requires passing in job
attributes,the command to execute ``(i.e. path to qsub -I)``
and the hostname
when not impersonating:
pexpect spawns the ``qsub -I`` command and expects a prompt
back, for each tuple in the interactive_script, it sends the
command and expects to match the return value.
when impersonating:
pexpect spawns ``sudo -u <user> qsub -I``. The rest is as
described in non- impersonating mode.
"""
logger = logging.getLogger(__name__)
pexpect_timeout = 15
pexpect_sleep_time = .1
du = DshUtils()
def __init__(self, job, cmd, host):
threading.Thread.__init__(self)
self.job = job
self.cmd = cmd
self.jobid = None
self.hostname = host
def run(self):
"""
Run the interactive job
"""
try:
import pexpect
except:
self.logger.error('pexpect module is required for '
'interactive jobs')
return None
job = self.job
cmd = self.cmd
self.jobid = None
self.logger.info("submit interactive job as " + job.username +
": " + " ".join(cmd))
if not hasattr(job, 'interactive_script'):
self.logger.debug('no interactive_script attribute on job')
return None
try:
# sleep to allow server to communicate with client
# this value is set empirically so tweaking may be
# needed
_st = self.pexpect_sleep_time
_to = self.pexpect_timeout
_sc = job.interactive_script
cmd = ['sudo', '-u', job.username] + cmd
self.logger.debug(cmd)
_p = pexpect.spawn(" ".join(cmd), timeout=_to)
self.job.interactive_handle = _p
time.sleep(_st)
_p.expect('qsub: waiting for job (?P<jobid>[\d\w.]+) to start.*')
if _p.match:
self.jobid = _p.match.group('jobid')
else:
_p.close()
self.job.interactive_handle = None
return None
self.logger.debug(_p.after.decode())
for _l in _sc:
self.logger.debug('sending: ' + _l[0])
_p.sendline(_l[0])
time.sleep(_st)
# only way I could figure out to catch a sleep command
# within a spawned pexpect child. Might need revisiting
if 'sleep' in _l[0]:
_secs = _l[0].split()[1]
self.logger.debug('sleeping ' + str(_secs))
time.sleep(float(_secs))
if len(_l) > 1:
for _r in range(1, len(_l)):
self.logger.debug('expecting: ' + _l[_r])
_p.expect(_l[_r])
time.sleep(_st)
self.logger.debug('received: ' + _p.after.decode())
time.sleep(_st)
self.logger.debug('received: ' + _p.after.decode())
self.logger.debug('sending Ctrl-D')
_p.sendcontrol('d')
time.sleep(_st)
_p.close()
self.job.interactive_handle = None
self.logger.debug(_p.exitstatus)
except Exception:
self.logger.error(traceback.print_exc())
return None
return self.jobid
class Queue(PBSObject):
"""
PBS Queue container, holds attributes of the queue and
pointer to server
:param name: Queue name
:type name: str or None
:param attrs: Queue attributes
:type attrs: Dictionary
"""
dflt_attributes = {}
def __init__(self, name=None, attrs={}, server=None):
self.logger = logging.getLogger(__name__)
PBSObject.__init__(self, name, attrs, self.dflt_attributes)
self.server = server
m = ['queue']
if server is not None:
m += ['@' + server.shortname]
if self.name is not None:
m += [' ', self.name]
m += [': ']
self.logprefix = "".join(m)
def revert_to_defaults(self):
"""
reset queue attributes to defaults
"""
ignore_attrs = ['id', ATTR_count, ATTR_rescassn]
ignore_attrs += [ATTR_qtype, ATTR_enable, ATTR_start, ATTR_total]
ignore_attrs += ['THE_END']
len_attrs = len(ignore_attrs)
unsetlist = []
setdict = {}
self.logger.info(
self.logprefix +
"reverting configuration to defaults")
if self.server is not None:
self.server.status(QUEUE, id=self.name, level=logging.DEBUG)
for k in self.attributes.keys():
for i in range(len_attrs):
if k.startswith(ignore_attrs[i]):
break
if (i == (len_attrs - 1)) and k not in self.dflt_attributes:
unsetlist.append(k)
if len(unsetlist) != 0 and self.server is not None:
try:
self.server.manager(MGR_CMD_UNSET, MGR_OBJ_QUEUE, unsetlist,
self.name)
except PbsManagerError, e:
self.logger.error(e.msg)
for k in self.dflt_attributes.keys():
if (k not in self.attributes or
self.attributes[k] != self.dflt_attributes[k]):
setdict[k] = self.dflt_attributes[k]
if len(setdict.keys()) != 0 and self.server is not None:
self.server.manager(MGR_CMD_SET, MGR_OBJ_QUEUE, setdict)
class PBSInitServices(object):
"""
PBS initialization services
:param hostname: Machine hostname
:type hostname: str or None
:param conf: PBS configuaration file
:type conf: str or None
"""
def __init__(self, hostname=None, conf=None):
self.logger = logging.getLogger(__name__)
self.hostname = hostname
if self.hostname is None:
self.hostname = socket.gethostname()
self.dflt_conf_file = os.environ.get('PBS_CONF_FILE', '/etc/pbs.conf')
self.conf_file = conf
self.du = DshUtils()
self.is_sunos = sys.platform.startswith('sunos')
self.is_aix = sys.platform.startswith('aix')
self.is_linux = sys.platform.startswith('linux')
def initd(self, hostname=None, op='status', conf_file=None,
init_script=None, daemon='all'):
"""
Run the init script for a given operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param op: one of status, start, stop, restart
:type op: str
:param conf_file: optional path to a configuration file
:type conf_file: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
:param daemon: name of daemon to operate on. one of server, mom,
sched, comm or all
:type daemon: str
"""
if hostname is None:
hostname = self.hostname
if conf_file is None:
conf_file = self.conf_file
return self._unix_initd(hostname, op, conf_file, init_script, daemon)
def restart(self, hostname=None, init_script=None):
"""
Run the init script for a restart operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script)
def restart_server(self, hostname=None, init_script=None):
"""
Run the init script for a restart server
:param hostname: hostname on which to restart server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='server')
def restart_mom(self, hostname=None, init_script=None):
"""
Run the init script for a restart mom
:param hostname: hostname on which to restart mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='mom')
def restart_sched(self, hostname=None, init_script=None):
"""
Run the init script for a restart sched
:param hostname: hostname on which to restart sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='sched')
def restart_comm(self, hostname=None, init_script=None):
"""
Run the init script for a restart comm
:param hostname: hostname on which to restart comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='restart', init_script=init_script,
daemon='comm')
def start(self, hostname=None, init_script=None):
"""
Run the init script for a start operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script)
def start_server(self, hostname=None, init_script=None):
"""
Run the init script for a start server
:param hostname: hostname on which to start server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='server')
def start_mom(self, hostname=None, init_script=None):
"""
Run the init script for a start mom
:param hostname: hostname on which to start mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='mom')
def start_sched(self, hostname=None, init_script=None):
"""
Run the init script for a start sched
:param hostname: hostname on which to start sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='sched')
def start_comm(self, hostname=None, init_script=None):
"""
Run the init script for a start comm
:param hostname: hostname on which to start comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='start', init_script=init_script,
daemon='comm')
def stop(self, hostname=None, init_script=None):
"""
Run the init script for a stop operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script)
def stop_server(self, hostname=None, init_script=None):
"""
Run the init script for a stop server
:param hostname: hostname on which to stop server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='server')
def stop_mom(self, hostname=None, init_script=None):
"""
Run the init script for a stop mom
:param hostname: hostname on which to stop mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='mom')
def stop_sched(self, hostname=None, init_script=None):
"""
Run the init script for a stop sched
:param hostname: hostname on which to stop sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='sched')
def stop_comm(self, hostname=None, init_script=None):
"""
Run the init script for a stop comm
:param hostname: hostname on which to stop comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='stop', init_script=init_script,
daemon='comm')
def status(self, hostname=None, init_script=None):
"""
Run the init script for a status operation
:param hostname: hostname on which to execute the init script
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script)
def status_server(self, hostname=None, init_script=None):
"""
Run the init script for a status server
:param hostname: hostname on which to status server
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='server')
def status_mom(self, hostname=None, init_script=None):
"""
Run the init script for a status mom
:param hostname: hostname on which to status mom
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='mom')
def status_sched(self, hostname=None, init_script=None):
"""
Run the init script for a status sched
:param hostname: hostname on which to status sched
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='sched')
def status_comm(self, hostname=None, init_script=None):
"""
Run the init script for a status comm
:param hostname: hostname on which to status comm
:type hostname: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
"""
return self.initd(hostname, op='status', init_script=init_script,
daemon='comm')
def _unix_initd(self, hostname, op, conf_file, init_script, daemon):
"""
Helper function for initd ``(*nix version)``
:param hostname: hostname on which init script should run
:type hostname: str
:param op: Operation on daemons - start, stop, restart or status
:op type: str
:param conf_file: Optional path to the pbs configuration file
:type conf_file: str or None
:param init_script: optional path to a PBS init script
:type init_script: str or None
:param daemon: name of daemon to operate on. one of server, mom,
sched, comm or all
:type daemon: str
"""
if daemon is not None and daemon != 'all':
conf = self.du.parse_pbs_config(hostname, conf_file)
dconf = {
'PBS_START_SERVER': 0,
'PBS_START_MOM': 0,
'PBS_START_SCHED': 0,
'PBS_START_COMM': 0
}
if daemon == 'server' and conf.get('PBS_START_SERVER', 0) != 0:
dconf['PBS_START_SERVER'] = 1
elif daemon == 'mom' and conf.get('PBS_START_MOM', 0) != 0:
dconf['PBS_START_MOM'] = 1
elif daemon == 'sched' and conf.get('PBS_START_SCHED', 0) != 0:
dconf['PBS_START_SCHED'] = 1
elif daemon == 'comm' and conf.get('PBS_START_COMM', 0) != 0:
dconf['PBS_START_COMM'] = 1
(fd, fn) = self.du.mkstemp(hostname)
os.close(fd)
self.du.set_pbs_config(hostname, fin=conf_file, fout=fn,
confs=dconf)
init_cmd = ['PBS_CONF_FILE=' + fn]
_as = True
else:
fn = None
if (conf_file is not None) and (conf_file != self.dflt_conf_file):
init_cmd = ['PBS_CONF_FILE=' + conf_file]
_as = True
else:
init_cmd = []
_as = False
conf = self.du.parse_pbs_config(hostname, conf_file)
if (init_script is None) or (not init_script.startswith('/')):
if 'PBS_EXEC' not in conf:
msg = 'Missing PBS_EXEC setting in pbs config'
raise PbsInitServicesError(rc=1, rv=False, msg=msg)
if init_script is None:
init_script = os.path.join(conf['PBS_EXEC'], 'libexec',
'pbs_init.d')
else:
init_script = os.path.join(conf['PBS_EXEC'], 'etc',
init_script)
if not self.du.isfile(hostname, path=init_script, sudo=True):
# Could be Type 3 installation where we will not have
# PBS_EXEC/libexec/pbs_init.d
return []
init_cmd += [init_script, op]
msg = 'running init script to ' + op + ' pbs'
if daemon is not None and daemon != 'all':
msg += ' ' + daemon
msg += ' on ' + hostname
if conf_file is not None:
msg += ' using ' + conf_file
msg += ' init_cmd=%s' % (str(init_cmd))
self.logger.info(msg)
ret = self.du.run_cmd(hostname, init_cmd, sudo=True, as_script=_as,
logerr=False)
if ret['rc'] != 0:
raise PbsInitServicesError(rc=ret['rc'], rv=False,
msg='\n'.join(ret['err']))
else:
return ret
def switch_version(self, hostname=None, version=None):
"""
Switch to another version of PBS installed on the system
:param hostname: The hostname to operate on
:type hostname: str or None
:param version: version to switch
"""
pbs_conf = self.du.parse_pbs_config(hostname)
if 'PBS_EXEC' in pbs_conf:
dn = os.path.dirname(pbs_conf['PBS_EXEC'])
newver = os.path.join(dn, version)
ret = self.du.isdir(hostname, path=newver)
if not ret:
msg = 'no version ' + version + ' on host ' + hostname
raise PbsInitServicesError(rc=0, rv=False, msg=msg)
self.stop(hostname)
dflt = os.path.join(dn, 'default')
ret = self.du.isfile(hostname, path=dflt)
if ret:
self.logger.info('removing symbolic link ' + dflt)
self.du.rm(hostname, dflt, sudo=True, logerr=False)
self.du.set_pbs_config(hostname, confs={'PBS_EXEC': dflt})
else:
self.du.set_pbs_config(hostname, confs={'PBS_EXEC': newver})
self.logger.info('linking ' + newver + ' to ' + dflt)
self.du.run_cmd(hostname, ['ln', '-s', newver, dflt],
sudo=True, logerr=False)
self.start(hostname)<|fim▁end|> | if c: |
<|file_name|>get.js<|end_file_name|><|fim▁begin|>var clusterpost = require("clusterpost-lib");
var path = require('path');
var Promise = require('bluebird');
var argv = require('minimist')(process.argv.slice(2));
const os = require('os');
const fs = require('fs');
var agentoptions = {
rejectUnauthorized: false
}
clusterpost.setAgentOptions(agentoptions);
const help = function(){
console.error("Help: Download tasks from the server.");
console.error("\nOptional parameters:");
console.error("--dir Output directory, default: ./out");
console.error("--status one of [DONE, RUN, FAIL, EXIT, UPLOADING, CREATE], default: DONE");
console.error("--print , if provided the information is printed only");
console.error("--delete, default false, when downloading jobs with status 'DONE', the jobs will be deleted upon completion");
console.error("--j job id, default: ");
console.error("--executable executable, default: all executables");
console.error("--email email, default: (authenticated user)");
console.error("--config_codename codename, default: clusterpost");
}
if(argv["h"] || argv["help"]){
help();
process.exit(1);
}
var deletejobs = false;
if(argv["delete"] !== undefined){
console.log("After successful download, jobs with status DONE will be deleted!");
deletejobs = true;
}
var userEmail = undefined;
if(argv["email"]){
userEmail = argv["email"];
}
var outputdir = "./out";
if(argv["dir"]){
outputdir = argv["dir"];
}
var status = 'DONE';
if(argv["status"]){
status = argv["status"];
}
var jobid = argv["j"];
var executable = argv["executable"];
var print = argv["print"];
console.log("Output dir", outputdir);
console.log("Status", status);
if(jobid){
console.log("jobid", jobid);
}
if(executable){
console.log("executable", executable);
}
if(print){
console.log("print", print);
}
var config_codename = 'clusterpost';
if(argv["config_codename"]){
config_codename = argv["config_codename"];
}
clusterpost.start(path.join(os.homedir(), '.' + config_codename + '.json'))
.then(function(){
if(!print){
if(!jobid){
return clusterpost.getJobs(executable, status, userEmail)
.then(function(jobs){
return Promise.map(jobs, function(job){
console.log(JSON.stringify(job, null, 2));
if(job.outputdir){
return clusterpost.getJobOutputs(job, job.outputdir)
.then(function(){
if(job.name){
console.log(job.name, "downloaded...");
}else{
console.log(job._id, "downloaded...");<|fim▁hole|> if(deletejobs){
console.log("Deleting job");
return clusterpost.deleteJob(job._id);
}
});
}else{
var joboutputdir = undefined;
if(job.name){
joboutputdir = path.join(outputdir, job.name);
}else{
joboutputdir = path.join(outputdir, job._id);
}
return clusterpost.getJobOutputs(job, joboutputdir)
.then(function(){
if(job.name){
console.log(job.name, "downloaded...");
}else{
console.log(job._id, "downloaded...");
}
if(deletejobs){
console.log("Deleting job");
return clusterpost.deleteJob(job._id);
}
});
}
},
{
concurrency: 1
});
});
}else{
return clusterpost.getDocument(jobid)
.then(function(job){
if(job.outputdir){
return clusterpost.getJobOutputs(job, job.outputdir);
}else{
var joboutputdir = undefined;
if(job.name){
joboutputdir = path.join(outputdir, job.name);
}else{
joboutputdir = path.join(outputdir, job._id);
}
return clusterpost.getJobOutputs(job, joboutputdir);
}
})
.then(function(){
console.log("job downloaded...");
if(deletejobs){
console.log("Deleting job");
return clusterpost.deleteJob(jobid);
}
});
}
}else{
if(!jobid){
return clusterpost.getJobs(executable, status, userEmail)
.then(function(jobs){
console.log(JSON.stringify(jobs, null, 2))
});
}else{
return clusterpost.getDocument(jobid)
.then(function(job){
console.log(JSON.stringify(job, null, 2))
});
}
}
})
.catch(console.error)<|fim▁end|> | }
|
<|file_name|>metrics_pb2.py<|end_file_name|><|fim▁begin|># Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chromiumos/metrics.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='chromiumos/metrics.proto',
package='chromiumos',
syntax='proto3',
serialized_options=_b('Z4go.chromium.org/chromiumos/infra/proto/go/chromiumos'),
serialized_pb=_b('\n\x18\x63hromiumos/metrics.proto\x12\nchromiumos\"i\n\x0bMetricEvent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1e\n\x16timestamp_milliseconds\x18\x02 \x01(\x03\x12\x1d\n\x15\x64uration_milliseconds\x18\x03 \x01(\x04\x12\r\n\x05gauge\x18\x04 \x01(\x04\x42\x36Z4go.chromium.org/chromiumos/infra/proto/go/chromiumosb\x06proto3')
)
_METRICEVENT = _descriptor.Descriptor(
name='MetricEvent',
full_name='chromiumos.MetricEvent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='chromiumos.MetricEvent.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp_milliseconds', full_name='chromiumos.MetricEvent.timestamp_milliseconds', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='duration_milliseconds', full_name='chromiumos.MetricEvent.duration_milliseconds', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gauge', full_name='chromiumos.MetricEvent.gauge', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[<|fim▁hole|> is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=145,
)
DESCRIPTOR.message_types_by_name['MetricEvent'] = _METRICEVENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MetricEvent = _reflection.GeneratedProtocolMessageType('MetricEvent', (_message.Message,), dict(
DESCRIPTOR = _METRICEVENT,
__module__ = 'chromiumos.metrics_pb2'
# @@protoc_insertion_point(class_scope:chromiumos.MetricEvent)
))
_sym_db.RegisterMessage(MetricEvent)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)<|fim▁end|> | ],
serialized_options=None, |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import res_company
# WARNING: Order of imports matters on this module, so don't put res_company
# below the other modules since it will lead to a missing column error when
# the module is initialized for the first time since there are fields with<|fim▁hole|>from . import event_mail
from . import event_type
from . import res_config_settings<|fim▁end|> | # default values wich refer to this new res.company field.
from . import event |
<|file_name|>test_azure_blobs.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import tempfile
from io import BytesIO
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import b
from libcloud.utils.py3 import basestring
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver
from libcloud.storage.drivers.azure_blobs import AZURE_BLOCK_MAX_SIZE
from libcloud.storage.drivers.azure_blobs import AZURE_PAGE_CHUNK_SIZE
from libcloud.test import unittest
from libcloud.test import MockHttp, generate_random_data # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_AZURE_BLOBS_PARAMS
class AzureBlobsMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('azure_blobs')
base_headers = {}
def _UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.UNAUTHORIZED])
def _list_containers_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_containers_1.xml')
else:
body = self.fixtures.load('list_containers_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container_EMPTY(self, method, url, body, headers):
if method == 'DELETE':
body = u''
return (httplib.ACCEPTED,
body,
self.base_headers,
httplib.responses[httplib.ACCEPTED])
else:
body = self.fixtures.load('list_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _new__container_INVALID_NAME(self, method, url, body, headers):
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
def _test_container(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_objects_1.xml')
else:
body = self.fixtures.load('list_objects_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container100(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_container200(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test_container200_test(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['content-length'] = '12345'
headers['content-type'] = 'application/zip'
headers['x-ms-blob-type'] = 'Block'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-rabbits'] = 'monkeys'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test2_test_list_containers(self, method, url, body, headers):
# test_get_object
body = self.fixtures.load('list_containers.xml')
headers = {'content-type': 'application/zip',
'etag': '"e31208wqsdoj329jd"',
'x-amz-meta-rabbits': 'monkeys',
'content-length': '12345',
'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT'
}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _new_container_ALREADY_EXISTS(self, method, url, body, headers):
# test_create_container
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.CONFLICT])
def _new_container(self, method, url, body, headers):
# test_create_container, test_delete_container
headers = {}
if method == 'PUT':
status = httplib.CREATED
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
elif method == 'DELETE':
status = httplib.NO_CONTENT
return (status,
body,
headers,
httplib.responses[status])
def _new_container_DOESNT_EXIST(self, method, url, body, headers):
# test_delete_container
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_NOT_FOUND(self, method, url, body, headers):
# test_delete_container_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body,
headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_foo_bar_object_DELETE(self, method, url, body, headers):
# test_delete_object
return (httplib.ACCEPTED,
body,
headers,
httplib.responses[httplib.ACCEPTED])
def _foo_bar_container_foo_test_upload(self, method, url, body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_block(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_page(self, method, url,
body, headers):
# test_upload_object_success
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_blocklist(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''<|fim▁hole|> return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_lease(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
action = headers['x-ms-lease-action']
rheaders = {'x-ms-lease-id': 'someleaseid'}
body = ''
if action == 'acquire':
return (httplib.CREATED,
body,
rheaders,
httplib.responses[httplib.CREATED])
else:
if headers.get('x-ms-lease-id', None) != 'someleaseid':
return (httplib.BAD_REQUEST,
body,
rheaders,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.OK,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_INVALID_HASH(self, method, url,
body, headers):
# test_upload_object_invalid_hash1
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_bar_object(self, method, url, body, headers):
# test_upload_object_invalid_file_size
self._assert_content_length_header_is_string(headers=headers)
body = generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url,
body, headers):
# test_upload_object_invalid_file_size
self._assert_content_length_header_is_string(headers=headers)
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _assert_content_length_header_is_string(self, headers):
if 'Content-Length' in headers:
self.assertTrue(isinstance(headers['Content-Length'], basestring))
class AzureBlobsTests(unittest.TestCase):
driver_type = AzureBlobsStorageDriver
driver_args = STORAGE_AZURE_BLOBS_PARAMS
mock_response_klass = AzureBlobsMockHttp
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_class = self.mock_response_klass
self.mock_response_klass.type = None
self.driver = self.create_driver()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_invalid_credentials(self):
self.mock_response_klass.type = 'UNAUTHORIZED'
try:
self.driver.list_containers()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, InvalidCredsError))
else:
self.fail('Exception was not thrown')
def test_list_containers_empty(self):
self.mock_response_klass.type = 'list_containers_EMPTY'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = 'list_containers'
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
containers = self.driver.list_containers()
self.assertEqual(len(containers), 4)
self.assertTrue('last_modified' in containers[1].extra)
self.assertTrue('url' in containers[1].extra)
self.assertTrue('etag' in containers[1].extra)
self.assertTrue('lease' in containers[1].extra)
self.assertTrue('meta_data' in containers[1].extra)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = 'EMPTY'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = None
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 4)
obj = objects[1]
self.assertEqual(obj.name, 'object2.txt')
self.assertEqual(obj.hash, '0x8CFB90F1BA8CD8F')
self.assertEqual(obj.size, 1048576)
self.assertEqual(obj.container.name, 'test_container')
self.assertTrue('meta1' in obj.meta_data)
self.assertTrue('meta2' in obj.meta_data)
self.assertTrue('last_modified' in obj.extra)
self.assertTrue('content_type' in obj.extra)
self.assertTrue('content_encoding' in obj.extra)
self.assertTrue('content_language' in obj.extra)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = None
try:
self.driver.get_container(container_name='test_container100')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_container_success(self):
self.mock_response_klass.type = None
container = self.driver.get_container(
container_name='test_container200')
self.assertTrue(container.name, 'test_container200')
self.assertTrue(container.extra['etag'], '0x8CFB877BB56A6FB')
self.assertTrue(container.extra['last_modified'],
'Fri, 04 Jan 2013 09:48:06 GMT')
self.assertTrue(container.extra['lease']['status'], 'unlocked')
self.assertTrue(container.extra['lease']['state'], 'available')
self.assertTrue(container.extra['meta_data']['meta1'], 'value1')
def test_get_object_container_doesnt_exist(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = None
try:
self.driver.get_object(container_name='test_container100',
object_name='test')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_object_success(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = None
obj = self.driver.get_object(container_name='test_container200',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'test_container200')
self.assertEqual(obj.size, 12345)
self.assertEqual(obj.hash, '0x8CFB877BB56A6FB')
self.assertEqual(obj.extra['last_modified'],
'Fri, 04 Jan 2013 09:48:06 GMT')
self.assertEqual(obj.extra['content_type'], 'application/zip')
self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
def test_create_container_invalid_name(self):
# invalid container name
self.mock_response_klass.type = 'INVALID_NAME'
try:
self.driver.create_container(container_name='new--container')
except InvalidContainerNameError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = 'ALREADY_EXISTS'
try:
self.driver.create_container(container_name='new-container')
except ContainerAlreadyExistsError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_success(self):
# success
self.mock_response_klass.type = None
name = 'new-container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'DOESNT_EXIST'
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_not_empty(self):
self.mock_response_klass.type = None
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_success(self):
self.mock_response_klass.type = 'EMPTY'
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_delete_container_not_found(self):
self.mock_response_klass.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Container does not exist but an exception was not' +
'thrown')
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_invalid_file_size(self):
self.mock_response_klass.type = 'INVALID_SIZE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertFalse(result)
def test_download_object_invalid_file_already_exists(self):
self.mock_response_klass.type = 'INVALID_SIZE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__)
try:
self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
def test_download_object_as_stream_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_invalid_ex_blob_type(self):
# Invalid hash is detected on the amazon side and BAD_REQUEST is
# returned
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True,
ex_blob_type='invalid-blob')
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(str(e).lower().find('invalid blob type') != -1)
else:
self.fail('Exception was not thrown')
def test_upload_object_invalid_md5(self):
# Invalid md5 is returned by azure
self.mock_response_klass.type = 'INVALID_HASH'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
file_path = os.path.abspath(__file__)
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True)
except ObjectHashMismatchError:
pass
else:
self.fail(
'Invalid hash was returned but an exception was not thrown')
def test_upload_small_block_object_success(self):
file_path = os.path.abspath(__file__)
file_size = os.stat(file_path).st_size
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
def test_upload_big_block_object_success(self):
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_BLOCK_MAX_SIZE + 1
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
def test_upload_page_object_success(self):
self.mock_response_klass.use_param = None
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 4
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
def test_upload_page_object_failure(self):
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 2 + 1
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
try:
self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob')
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(str(e).lower().find('not aligned') != -1)
os.remove(file_path)
def test_upload_small_block_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = os.path.abspath(__file__)
file_size = os.stat(file_path).st_size
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob',
ex_use_lease=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
self.mock_response_klass.use_param = None
def test_upload_big_block_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_BLOCK_MAX_SIZE * 2
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob',
ex_use_lease=False)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
self.mock_response_klass.use_param = None
def test_upload_page_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 4
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob',
ex_use_lease=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
self.mock_response_klass.use_param = None
def test_upload_blob_object_via_stream(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
iterator = BytesIO(b('345'))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
self.mock_response_klass.use_param = None
def test_upload_blob_object_via_stream_with_lease(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
iterator = BytesIO(b('345'))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='BlockBlob',
ex_use_lease=True)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
self.mock_response_klass.use_param = None
def test_upload_page_object_via_stream(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
blob_size = AZURE_PAGE_CHUNK_SIZE
iterator = BytesIO(b('1' * blob_size))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='PageBlob',
ex_page_blob_size=blob_size)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, blob_size)
self.mock_response_klass.use_param = None
def test_upload_page_object_via_stream_with_lease(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
blob_size = AZURE_PAGE_CHUNK_SIZE
iterator = BytesIO(b('1' * blob_size))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='PageBlob',
ex_page_blob_size=blob_size,
ex_use_lease=True)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, blob_size)
def test_delete_object_not_found(self):
self.mock_response_klass.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
try:
self.driver.delete_object(obj=obj)
except ObjectDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_object_success(self):
self.mock_response_klass.type = 'DELETE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
def test_storage_driver_host(self):
# Non regression tests for issue LIBCLOUD-399 dealing with the bad
# management of the connectionCls.host class attribute
driver1 = self.driver_type('fakeaccount1', 'deadbeafcafebabe==')
driver2 = self.driver_type('fakeaccount2', 'deadbeafcafebabe==')
driver3 = self.driver_type('fakeaccount3', 'deadbeafcafebabe==',
host='test.foo.bar.com')
host1 = driver1.connection.host
host2 = driver2.connection.host
host3 = driver3.connection.host
self.assertEqual(host1, 'fakeaccount1.blob.core.windows.net')
self.assertEqual(host2, 'fakeaccount2.blob.core.windows.net')
self.assertEqual(host3, 'test.foo.bar.com')
if __name__ == '__main__':
sys.exit(unittest.main())<|fim▁end|> | headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
|
<|file_name|>deprecation.py<|end_file_name|><|fim▁begin|>import warnings<|fim▁hole|>
def __new__(cls, value, *args, **kwargs):
return super(DeprecatedCallableStr, cls).__new__(cls, value)
def __init__(self, value, warning, warning_cls):
self.warning, self.warning_cls = warning, warning_cls
def __call__(self, *args, **kwargs):
warnings.warn(self.warning, self.warning_cls, stacklevel=2)
return str(self)
def __repr__(self):
super_repr = super(DeprecatedCallableStr, self).__repr__()
return '<DeprecatedCallableStr {}>'.format(super_repr)<|fim▁end|> |
class DeprecatedCallableStr(str):
do_no_call_in_templates = True |
<|file_name|>openacademy_course.py<|end_file_name|><|fim▁begin|>'''
This module is to create model of Course
'''
from openerp import api, fields, models, _
class Course(models.Model):
'''
This class create model of Course
'''
_name = 'openacademy.course' # Model odoo name
name = fields.Char(string='Title', required=True) # Field reserved
description = fields.Text(string='Description')
responsible_id = fields.Many2one('res.users',
ondelete='set null',
string="Responsible", index=True)
session_ids = fields.One2many('openacademy.session', 'course_id',
string="Sessions")
_sql_constraints = [
('name_description_check',
'CHECK(name != description)',
_("The title of the course should not be the description")),
('name_unique',
'UNIQUE(name)',
_("The course title must be unique")),
]
@api.one # api.one send defaults params: cr, uid, id, context
def copy(self, default=None):<|fim▁hole|> # print "estoy pasando por la funcion heredada de copy en cursos"
if default is None:
default = {}
# default['name'] = self.name + ' (copy)'
copied_count = self.search_count(
[('name', '=like', _(u"Copy of {}%").format(self.name))])
if not copied_count:
new_name = _(u"Copy of {}").format(self.name)
else:
new_name = _(u"Copy of {} ({})").format(self.name, copied_count)
default['name'] = new_name
return super(Course, self).copy(default)<|fim▁end|> | |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | export { FilterDisableHiddenStateComponent } from './filter-disable-hidden-state.component'; |
<|file_name|>status.cpp<|end_file_name|><|fim▁begin|>/***************************************************************
*
* Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
* University of Wisconsin-Madison, WI.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***************************************************************/
#include "condor_common.h"
#include "condor_config.h"
#include "condor_state.h"
#include "condor_api.h"
#include "status_types.h"
#include "totals.h"
#include "get_daemon_name.h"
#include "daemon.h"
#include "dc_collector.h"
#include "extArray.h"
#include "sig_install.h"
#include "string_list.h"
#include "condor_string.h" // for strnewp()
#include "match_prefix.h" // is_arg_colon_prefix
#include "print_wrapped_text.h"
#include "error_utils.h"
#include "condor_distribution.h"
#include "condor_version.h"
#include <vector>
#include <sstream>
#include <iostream>
using std::vector;
using std::string;
using std::stringstream;
struct SortSpec {
string arg;
string keyAttr;
string keyExprAttr;
ExprTree* expr;
ExprTree* exprLT;
ExprTree* exprEQ;
SortSpec(): arg(), keyAttr(), keyExprAttr(), expr(NULL), exprLT(NULL), exprEQ(NULL) {}
~SortSpec() {
if (NULL != expr) delete expr;
if (NULL != exprLT) delete exprLT;
if (NULL != exprEQ) delete exprEQ;
}
SortSpec(const SortSpec& src): expr(NULL), exprLT(NULL), exprEQ(NULL) { *this = src; }
SortSpec& operator=(const SortSpec& src) {
if (this == &src) return *this;
arg = src.arg;
keyAttr = src.keyAttr;
keyExprAttr = src.keyExprAttr;
if (NULL != expr) delete expr;
expr = src.expr->Copy();
if (NULL != exprLT) delete exprLT;
exprLT = src.exprLT->Copy();
if (NULL != exprEQ) delete exprEQ;
exprEQ = src.exprEQ->Copy();
return *this;
}
};
// global variables
AttrListPrintMask pm;
printmask_headerfooter_t pmHeadFoot = STD_HEADFOOT;
List<const char> pm_head; // The list of headings for the mask entries
std::vector<GroupByKeyInfo> group_by_keys; // TJ 8.1.5 for future use, ignored for now.
bool explicit_format = false;
bool using_print_format = false; // hack for now so we can get standard totals when using -print-format
bool disable_user_print_files = false; // allow command line to defeat use of default user print files.
const char *DEFAULT= "<default>";
DCCollector* pool = NULL;
AdTypes type = (AdTypes) -1;
ppOption ppStyle = PP_NOTSET;
ppOption ppTotalStyle = PP_NOTSET; // used when setting PP_CUSTOM to keep track of how to do totals.
int wantOnlyTotals = 0;
int summarySize = -1;
bool expert = false;
bool wide_display = false; // when true, don't truncate field data
bool invalid_fields_empty = false; // when true, print "" instead of "[?]" for missing data
Mode mode = MODE_NOTSET;
const char * mode_constraint = NULL; // constraint set by mode
int diagnose = 0;
char* direct = NULL;
char* statistics = NULL;
char* genericType = NULL;
CondorQuery *query;
char buffer[1024];
char *myName;
vector<SortSpec> sortSpecs;
bool noSort = false; // set to true to disable sorting entirely
bool javaMode = false;
bool vmMode = false;
bool absentMode = false;
char *target = NULL;
const char * ads_file = NULL; // read classads from this file instead of querying them from the collector
ClassAd *targetAd = NULL;
ArgList projList; // Attributes that we want the server to send us
// instantiate templates
// function declarations
void usage ();
void firstPass (int, char *[]);
void secondPass (int, char *[]);
void prettyPrint(ClassAdList &, TrackTotals *);
int matchPrefix(const char *, const char *, int min_len);
int lessThanFunc(AttrList*,AttrList*,void*);
int customLessThanFunc(AttrList*,AttrList*,void*);
static bool read_classad_file(const char *filename, ClassAdList &classads, const char * constr);
extern "C" int SetSyscalls (int) {return 0;}
extern void setPPstyle (ppOption, int, const char *);<|fim▁hole|>int
main (int argc, char *argv[])
{
#if !defined(WIN32)
install_sig_handler(SIGPIPE, (SIG_HANDLER)SIG_IGN );
#endif
// initialize to read from config file
myDistro->Init( argc, argv );
myName = argv[0];
config();
dprintf_config_tool_on_error(0);
// The arguments take two passes to process --- the first pass
// figures out the mode, after which we can instantiate the required
// query object. We add implied constraints from the command line in
// the second pass.
firstPass (argc, argv);
// if the mode has not been set, it is STARTD_NORMAL
if (mode == MODE_NOTSET) {
setMode (MODE_STARTD_NORMAL, 0, DEFAULT);
}
// instantiate query object
if (!(query = new CondorQuery (type))) {
dprintf_WriteOnErrorBuffer(stderr, true);
fprintf (stderr, "Error: Out of memory\n");
exit (1);
}
// if a first-pass setMode set a mode_constraint, apply it now to the query object
if (mode_constraint && ! explicit_format) {
query->addANDConstraint(mode_constraint);
}
// set pretty print style implied by the type of entity being queried
// but do it with default priority, so that explicitly requested options
// can override it
switch (type)
{
#ifdef HAVE_EXT_POSTGRESQL
case QUILL_AD:
setPPstyle(PP_QUILL_NORMAL, 0, DEFAULT);
break;
#endif /* HAVE_EXT_POSTGRESQL */
case DEFRAG_AD:
setPPstyle(PP_GENERIC_NORMAL, 0, DEFAULT);
break;
case STARTD_AD:
setPPstyle(PP_STARTD_NORMAL, 0, DEFAULT);
break;
case SCHEDD_AD:
setPPstyle(PP_SCHEDD_NORMAL, 0, DEFAULT);
break;
case MASTER_AD:
setPPstyle(PP_MASTER_NORMAL, 0, DEFAULT);
break;
case CKPT_SRVR_AD:
setPPstyle(PP_CKPT_SRVR_NORMAL, 0, DEFAULT);
break;
case COLLECTOR_AD:
setPPstyle(PP_COLLECTOR_NORMAL, 0, DEFAULT);
break;
case STORAGE_AD:
setPPstyle(PP_STORAGE_NORMAL, 0, DEFAULT);
break;
case NEGOTIATOR_AD:
setPPstyle(PP_NEGOTIATOR_NORMAL, 0, DEFAULT);
break;
case GRID_AD:
setPPstyle(PP_GRID_NORMAL, 0, DEFAULT);
break;
case GENERIC_AD:
setPPstyle(PP_GENERIC, 0, DEFAULT);
break;
case ANY_AD:
setPPstyle(PP_ANY_NORMAL, 0, DEFAULT);
break;
default:
setPPstyle(PP_VERBOSE, 0, DEFAULT);
}
// set the constraints implied by the mode
switch (mode) {
#ifdef HAVE_EXT_POSTGRESQL
case MODE_QUILL_NORMAL:
#endif /* HAVE_EXT_POSTGRESQL */
case MODE_DEFRAG_NORMAL:
case MODE_STARTD_NORMAL:
case MODE_MASTER_NORMAL:
case MODE_CKPT_SRVR_NORMAL:
case MODE_SCHEDD_NORMAL:
case MODE_SCHEDD_SUBMITTORS:
case MODE_COLLECTOR_NORMAL:
case MODE_NEGOTIATOR_NORMAL:
case MODE_STORAGE_NORMAL:
case MODE_GENERIC_NORMAL:
case MODE_ANY_NORMAL:
case MODE_GRID_NORMAL:
case MODE_HAD_NORMAL:
break;
case MODE_OTHER:
// tell the query object what the type we're querying is
query->setGenericQueryType(genericType);
free(genericType);
genericType = NULL;
break;
case MODE_STARTD_AVAIL:
// For now, -avail shows you machines avail to anyone.
sprintf (buffer, "%s == \"%s\"", ATTR_STATE,
state_to_string(unclaimed_state));
if (diagnose) {
printf ("Adding constraint [%s]\n", buffer);
}
query->addORConstraint (buffer);
break;
case MODE_STARTD_RUN:
sprintf (buffer, "%s == \"%s\"", ATTR_STATE,
state_to_string(claimed_state));
if (diagnose) {
printf ("Adding constraint [%s]\n", buffer);
}
query->addORConstraint (buffer);
break;
case MODE_STARTD_COD:
sprintf (buffer, "%s > 0", ATTR_NUM_COD_CLAIMS );
if (diagnose) {
printf ("Adding constraint [%s]\n", buffer);
}
query->addORConstraint (buffer);
break;
default:
break;
}
if(javaMode) {
sprintf( buffer, "%s == TRUE", ATTR_HAS_JAVA );
if (diagnose) {
printf ("Adding constraint [%s]\n", buffer);
}
query->addANDConstraint (buffer);
projList.AppendArg(ATTR_HAS_JAVA);
projList.AppendArg(ATTR_JAVA_MFLOPS);
projList.AppendArg(ATTR_JAVA_VENDOR);
projList.AppendArg(ATTR_JAVA_VERSION);
}
if(absentMode) {
sprintf( buffer, "%s == TRUE", ATTR_ABSENT );
if (diagnose) {
printf( "Adding constraint %s\n", buffer );
}
query->addANDConstraint( buffer );
projList.AppendArg( ATTR_ABSENT );
projList.AppendArg( ATTR_LAST_HEARD_FROM );
projList.AppendArg( ATTR_CLASSAD_LIFETIME );
}
if(vmMode) {
sprintf( buffer, "%s == TRUE", ATTR_HAS_VM);
if (diagnose) {
printf ("Adding constraint [%s]\n", buffer);
}
query->addANDConstraint (buffer);
projList.AppendArg(ATTR_VM_TYPE);
projList.AppendArg(ATTR_VM_MEMORY);
projList.AppendArg(ATTR_VM_NETWORKING);
projList.AppendArg(ATTR_VM_NETWORKING_TYPES);
projList.AppendArg(ATTR_VM_HARDWARE_VT);
projList.AppendArg(ATTR_VM_AVAIL_NUM);
projList.AppendArg(ATTR_VM_ALL_GUEST_MACS);
projList.AppendArg(ATTR_VM_ALL_GUEST_IPS);
projList.AppendArg(ATTR_VM_GUEST_MAC);
projList.AppendArg(ATTR_VM_GUEST_IP);
}
// second pass: add regular parameters and constraints
if (diagnose) {
printf ("----------\n");
}
secondPass (argc, argv);
// initialize the totals object
if (ppStyle == PP_CUSTOM && using_print_format) {
if (pmHeadFoot & HF_NOSUMMARY) ppTotalStyle = PP_CUSTOM;
} else {
ppTotalStyle = ppStyle;
}
TrackTotals totals(ppTotalStyle);
// fetch the query
QueryResult q;
if ((mode == MODE_STARTD_NORMAL) && (ppStyle == PP_STARTD_NORMAL)) {
projList.AppendArg("Name");
projList.AppendArg("Machine");
projList.AppendArg("Opsys");
projList.AppendArg("Arch");
projList.AppendArg("State");
projList.AppendArg("Activity");
projList.AppendArg("LoadAvg");
projList.AppendArg("Memory");
projList.AppendArg("ActvtyTime");
projList.AppendArg("MyCurrentTime");
projList.AppendArg("EnteredCurrentActivity");
} else if( ppStyle == PP_VERBOSE ) {
// Remove everything from the projection list if we're displaying
// the "long form" of the ads.
projList.Clear();
}
if( projList.Count() > 0 ) {
char **attr_list = projList.GetStringArray();
query->setDesiredAttrs(attr_list);
deleteStringArray(attr_list);
}
// if diagnose was requested, just print the query ad
if (diagnose) {
ClassAd queryAd;
// print diagnostic information about inferred internal state
setMode ((Mode) 0, 0, NULL);
setType (NULL, 0, NULL);
setPPstyle ((ppOption) 0, 0, DEFAULT);
printf ("----------\n");
q = query->getQueryAd (queryAd);
fPrintAd (stdout, queryAd);
printf ("----------\n");
fprintf (stderr, "Result of making query ad was: %d\n", q);
exit (1);
}
// Address (host:port) is taken from requested pool, if given.
char* addr = (NULL != pool) ? pool->addr() : NULL;
Daemon* requested_daemon = pool;
// If we're in "direct" mode, then we attempt to locate the daemon
// associated with the requested subsystem (here encoded by value of mode)
// In this case the host:port of pool (if given) denotes which
// pool is being consulted
if( direct ) {
Daemon *d = NULL;
switch( mode ) {
case MODE_MASTER_NORMAL:
d = new Daemon( DT_MASTER, direct, addr );
break;
case MODE_STARTD_NORMAL:
case MODE_STARTD_AVAIL:
case MODE_STARTD_RUN:
case MODE_STARTD_COD:
d = new Daemon( DT_STARTD, direct, addr );
break;
#ifdef HAVE_EXT_POSTGRESQL
case MODE_QUILL_NORMAL:
d = new Daemon( DT_QUILL, direct, addr );
break;
#endif /* HAVE_EXT_POSTGRESQL */
case MODE_SCHEDD_NORMAL:
case MODE_SCHEDD_SUBMITTORS:
d = new Daemon( DT_SCHEDD, direct, addr );
break;
case MODE_NEGOTIATOR_NORMAL:
d = new Daemon( DT_NEGOTIATOR, direct, addr );
break;
case MODE_CKPT_SRVR_NORMAL:
case MODE_COLLECTOR_NORMAL:
case MODE_LICENSE_NORMAL:
case MODE_STORAGE_NORMAL:
case MODE_GENERIC_NORMAL:
case MODE_ANY_NORMAL:
case MODE_OTHER:
case MODE_GRID_NORMAL:
case MODE_HAD_NORMAL:
// These have to go to the collector, anyway.
break;
default:
fprintf( stderr, "Error: Illegal mode %d\n", mode );
exit( 1 );
break;
}
// Here is where we actually override 'addr', if we can obtain
// address of the requested daemon/subsys. If it can't be
// located, then fail with error msg.
// 'd' will be null (unset) if mode is one of above that must go to
// collector (MODE_ANY_NORMAL, MODE_COLLECTOR_NORMAL, etc)
if (NULL != d) {
if( d->locate() ) {
addr = d->addr();
requested_daemon = d;
} else {
const char* id = d->idStr();
if (NULL == id) id = d->name();
dprintf_WriteOnErrorBuffer(stderr, true);
if (NULL == id) id = "daemon";
fprintf(stderr, "Error: Failed to locate %s\n", id);
fprintf(stderr, "%s\n", d->error());
exit( 1 );
}
}
}
ClassAdList result;
CondorError errstack;
if (NULL != ads_file) {
MyString req; // query requirements
q = query->getRequirements(req);
const char * constraint = req.empty() ? NULL : req.c_str();
if (read_classad_file(ads_file, result, constraint)) {
q = Q_OK;
}
} else if (NULL != addr) {
// this case executes if pool was provided, or if in "direct" mode with
// subsystem that corresponds to a daemon (above).
// Here 'addr' represents either the host:port of requested pool, or
// alternatively the host:port of daemon associated with requested subsystem (direct mode)
q = query->fetchAds (result, addr, &errstack);
} else {
// otherwise obtain list of collectors and submit query that way
CollectorList * collectors = CollectorList::create();
q = collectors->query (*query, result, &errstack);
delete collectors;
}
// if any error was encountered during the query, report it and exit
if (Q_OK != q) {
dprintf_WriteOnErrorBuffer(stderr, true);
// we can always provide these messages:
fprintf( stderr, "Error: %s\n", getStrQueryResult(q) );
fprintf( stderr, "%s\n", errstack.getFullText(true).c_str() );
if ((NULL != requested_daemon) && ((Q_NO_COLLECTOR_HOST == q) ||
(requested_daemon->type() == DT_COLLECTOR)))
{
// Specific long message if connection to collector failed.
const char* fullhost = requested_daemon->fullHostname();
if (NULL == fullhost) fullhost = "<unknown_host>";
const char* daddr = requested_daemon->addr();
if (NULL == daddr) daddr = "<unknown>";
char info[1000];
sprintf(info, "%s (%s)", fullhost, daddr);
printNoCollectorContact( stderr, info, !expert );
} else if ((NULL != requested_daemon) && (Q_COMMUNICATION_ERROR == q)) {
// more helpful message for failure to connect to some daemon/subsys
const char* id = requested_daemon->idStr();
if (NULL == id) id = requested_daemon->name();
if (NULL == id) id = "daemon";
const char* daddr = requested_daemon->addr();
if (NULL == daddr) daddr = "<unknown>";
fprintf(stderr, "Error: Failed to contact %s at %s\n", id, daddr);
}
// fail
exit (1);
}
if (noSort) {
// do nothing
} else if (sortSpecs.empty()) {
// default classad sorting
result.Sort((SortFunctionType)lessThanFunc);
} else {
// User requested custom sorting expressions:
// insert attributes related to custom sorting
result.Open();
while (ClassAd* ad = result.Next()) {
for (vector<SortSpec>::iterator ss(sortSpecs.begin()); ss != sortSpecs.end(); ++ss) {
ss->expr->SetParentScope(ad);
classad::Value v;
ss->expr->Evaluate(v);
stringstream vs;
// This will properly render all supported value types,
// including undefined and error, although current semantic
// pre-filters classads where sort expressions are undef/err:
vs << ((v.IsStringValue())?"\"":"") << v << ((v.IsStringValue())?"\"":"");
ad->AssignExpr(ss->keyAttr.c_str(), vs.str().c_str());
// Save the full expr in case user wants to examine on output:
ad->AssignExpr(ss->keyExprAttr.c_str(), ss->arg.c_str());
}
}
result.Open();
result.Sort((SortFunctionType)customLessThanFunc);
}
// output result
prettyPrint (result, &totals);
delete query;
return 0;
}
const CustomFormatFnTable * getCondorStatusPrintFormats();
int set_status_print_mask_from_stream (
const char * streamid,
bool is_filename,
const char ** pconstraint)
{
std::string where_expr;
std::string messages;
StringList attrs;
SimpleInputStream * pstream = NULL;
*pconstraint = NULL;
FILE *file = NULL;
if (MATCH == strcmp("-", streamid)) {
pstream = new SimpleFileInputStream(stdin, false);
} else if (is_filename) {
file = safe_fopen_wrapper_follow(streamid, "r");
if (file == NULL) {
fprintf(stderr, "Can't open select file: %s\n", streamid);
return -1;
}
pstream = new SimpleFileInputStream(file, true);
} else {
pstream = new StringLiteralInputStream(streamid);
}
ASSERT(pstream);
int err = SetAttrListPrintMaskFromStream(
*pstream,
*getCondorStatusPrintFormats(),
pm,
pmHeadFoot,
group_by_keys,
where_expr,
attrs,
messages);
delete pstream; pstream = NULL;
if ( ! err) {
if ( ! where_expr.empty()) {
*pconstraint = pm.store(where_expr.c_str());
//if ( ! validate_constraint(*pconstraint)) {
// formatstr_cat(messages, "WHERE expression is not valid: %s\n", *pconstraint);
//}
}
// convert projection list into the format that condor status likes. because programmers.
attrs.rewind();
const char * attr;
while ((attr = attrs.next())) { projList.AppendArg(attr); }
}
if ( ! messages.empty()) { fprintf(stderr, "%s", messages.c_str()); }
return err;
}
static bool read_classad_file(const char *filename, ClassAdList &classads, const char * constr)
{
bool success = false;
FILE* file = safe_fopen_wrapper_follow(filename, "r");
if (file == NULL) {
fprintf(stderr, "Can't open file of job ads: %s\n", filename);
return false;
} else {
CondorClassAdFileParseHelper parse_helper("\n");
for (;;) {
ClassAd* classad = new ClassAd();
int error;
bool is_eof;
int cAttrs = classad->InsertFromFile(file, is_eof, error, &parse_helper);
bool include_classad = cAttrs > 0 && error >= 0;
if (include_classad && constr) {
classad::Value val;
if (classad->EvaluateExpr(constr,val)) {
if ( ! val.IsBooleanValueEquiv(include_classad)) {
include_classad = false;
}
}
}
if (include_classad) {
classads.Insert(classad);
} else {
delete classad;
}
if (is_eof) {
success = true;
break;
}
if (error < 0) {
success = false;
break;
}
}
fclose(file);
}
return success;
}
void
usage ()
{
fprintf (stderr,"Usage: %s [help-opt] [query-opt] [display-opt] "
"[custom-opts ...] [name ...]\n"
" where [help-opt] is one of\n"
"\t-help\t\t\tPrint this screen and exit\n"
"\t-version\t\tPrint HTCondor version and exit\n"
"\t-diagnose\t\tPrint out query ad without performing query\n"
" and [query-opt] is one of\n"
"\t-absent\t\t\tPrint information about absent resources\n"
"\t-avail\t\t\tPrint information about available resources\n"
"\t-ckptsrvr\t\tDisplay checkpoint server attributes\n"
"\t-claimed\t\tPrint information about claimed resources\n"
"\t-cod\t\t\tDisplay Computing On Demand (COD) jobs\n"
"\t-collector\t\tDisplay collector daemon attributes\n"
"\t-debug\t\t\tDisplay debugging info to console\n"
"\t-defrag\t\t\tDisplay status of defrag daemon\n"
"\t-direct <host>\t\tGet attributes directly from the given daemon\n"
"\t-java\t\t\tDisplay Java-capable hosts\n"
"\t-vm\t\t\tDisplay VM-capable hosts\n"
"\t-license\t\tDisplay attributes of licenses\n"
"\t-master\t\t\tDisplay daemon master attributes\n"
"\t-pool <name>\t\tGet information from collector <name>\n"
"\t-ads <file>\t\tGet information from <file>\n"
"\t-grid\t\t\tDisplay grid resources\n"
"\t-run\t\t\tSame as -claimed [deprecated]\n"
#ifdef HAVE_EXT_POSTGRESQL
"\t-quill\t\t\tDisplay attributes of quills\n"
#endif /* HAVE_EXT_POSTGRESQL */
"\t-schedd\t\t\tDisplay attributes of schedds\n"
"\t-server\t\t\tDisplay important attributes of resources\n"
"\t-startd\t\t\tDisplay resource attributes\n"
"\t-generic\t\tDisplay attributes of 'generic' ads\n"
"\t-subsystem <type>\tDisplay classads of the given type\n"
"\t-negotiator\t\tDisplay negotiator attributes\n"
"\t-storage\t\tDisplay network storage resources\n"
"\t-any\t\t\tDisplay any resources\n"
"\t-state\t\t\tDisplay state of resources\n"
"\t-submitters\t\tDisplay information about request submitters\n"
// "\t-statistics <set>:<n>\tDisplay statistics for <set> at level <n>\n"
// "\t\t\t\tsee STATISTICS_TO_PUBLISH for valid <set> and level values\n"
// "\t-world\t\t\tDisplay all pools reporting to UW collector\n"
" and [display-opt] is one of\n"
"\t-long\t\t\tDisplay entire classads\n"
"\t-sort <expr>\t\tSort entries by expressions. 'no' disables sorting\n"
"\t-total\t\t\tDisplay totals only\n"
"\t-verbose\t\tSame as -long\n"
"\t-wide\t\t\tdon't truncate data to fit in 80 columns.\n"
"\t-xml\t\t\tDisplay entire classads, but in XML\n"
"\t-attributes X,Y,...\tAttributes to show in -xml or -long \n"
"\t-expert\t\t\tDisplay shorter error messages\n"
" and [custom-opts ...] are one or more of\n"
"\t-constraint <const>\tAdd constraint on classads\n"
"\t-format <fmt> <attr>\tRegister display format and attribute\n"
"\t-autoformat:[V,ntlh] <attr> [attr2 [attr3 ...]]\t Print attr(s) with automatic formatting\n"
"\t\tV\tUse %%V formatting\n"
"\t\t,\tComma separated (default is space separated)\n"
"\t\tt\tTab separated\n"
"\t\tn\tNewline after each attribute\n"
"\t\tl\tLabel each value\n"
"\t\th\tHeadings\n"
"\t-target filename\tIf -format or -af is used, the option target classad\n",
myName);
}
void
firstPass (int argc, char *argv[])
{
int had_pool_error = 0;
int had_direct_error = 0;
int had_statistics_error = 0;
//bool explicit_mode = false;
const char * pcolon = NULL;
// Process arguments: there are dependencies between them
// o -l/v and -serv are mutually exclusive
// o -sub, -avail and -run are mutually exclusive
// o -pool and -entity may be used at most once
// o since -c can be processed only after the query has been instantiated,
// constraints are added on the second pass
for (int i = 1; i < argc; i++) {
if (matchPrefix (argv[i], "-avail", 3)) {
setMode (MODE_STARTD_AVAIL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-pool", 2)) {
if( pool ) {
delete pool;
had_pool_error = 1;
}
i++;
if( ! argv[i] ) {
fprintf( stderr, "%s: -pool requires a hostname as an argument.\n",
myName );
if (!expert) {
printf("\n");
print_wrapped_text("Extra Info: The hostname should be the central "
"manager of the Condor pool you wish to work with.",
stderr);
printf("\n");
}
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
pool = new DCCollector( argv[i] );
if( !pool->addr() ) {
dprintf_WriteOnErrorBuffer(stderr, true);
fprintf( stderr, "Error: %s\n", pool->error() );
if (!expert) {
printf("\n");
print_wrapped_text("Extra Info: You specified a hostname for a pool "
"(the -pool argument). That should be the Internet "
"host name for the central manager of the pool, "
"but it does not seem to "
"be a valid hostname. (The DNS lookup failed.)",
stderr);
}
exit( 1 );
}
} else
if (is_dash_arg_prefix (argv[i], "ads", 2)) {
if( !argv[i+1] ) {
fprintf( stderr, "%s: -ads requires a filename argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
i += 1;
ads_file = argv[i];
} else
if (matchPrefix (argv[i], "-format", 2)) {
setPPstyle (PP_CUSTOM, i, argv[i]);
if( !argv[i+1] || !argv[i+2] ) {
fprintf( stderr, "%s: -format requires two other arguments\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
i += 2;
explicit_format = true;
} else
if (*argv[i] == '-' &&
(is_arg_colon_prefix(argv[i]+1, "autoformat", &pcolon, 5) ||
is_arg_colon_prefix(argv[i]+1, "af", &pcolon, 2)) ) {
// make sure we have at least one more argument
if ( !argv[i+1] || *(argv[i+1]) == '-') {
fprintf( stderr, "Error: Argument %s requires "
"at last one attribute parameter\n", argv[i] );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
explicit_format = true;
setPPstyle (PP_CUSTOM, i, argv[i]);
while (argv[i+1] && *(argv[i+1]) != '-') {
++i;
}
// if autoformat list ends in a '-' without any characters after it, just eat the arg and keep going.
if (i+1 < argc && '-' == (argv[i+1])[0] && 0 == (argv[i+1])[1]) {
++i;
}
} else
if (is_dash_arg_colon_prefix(argv[i], "print-format", &pcolon, 2)) {
if ( (i+1 >= argc) || (*(argv[i+1]) == '-' && (argv[i+1])[1] != 0)) {
fprintf( stderr, "Error: Argument -print-format requires a filename argument\n");
exit( 1 );
}
explicit_format = true;
++i; // eat the next argument.
// we can't fully parse the print format argument until the second pass, so we are done for now.
} else
if (matchPrefix (argv[i], "-wide", 3)) {
wide_display = true; // when true, don't truncate field data
//invalid_fields_empty = true;
} else
if (matchPrefix (argv[i], "-target", 5)) {
if( !argv[i+1] ) {
fprintf( stderr, "%s: -target requires one additional argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
i += 1;
target = argv[i];
FILE *targetFile = safe_fopen_wrapper_follow(target, "r");
int iseof, iserror, empty;
targetAd = new ClassAd(targetFile, "\n\n", iseof, iserror, empty);
fclose(targetFile);
} else
if (matchPrefix (argv[i], "-constraint", 4)) {
// can add constraints on second pass only
i++;
if( ! argv[i] ) {
fprintf( stderr, "%s: -constraint requires another argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
} else
if (matchPrefix (argv[i], "-direct", 4)) {
if( direct ) {
free( direct );
had_direct_error = 1;
}
i++;
if( ! argv[i] ) {
fprintf( stderr, "%s: -direct requires another argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
direct = strdup( argv[i] );
} else
if (matchPrefix (argv[i], "-diagnose", 4)) {
diagnose = 1;
} else
if (matchPrefix (argv[i], "-debug", 3)) {
// dprintf to console
dprintf_set_tool_debug("TOOL", 0);
} else
if (matchPrefix (argv[i], "-defrag", 4)) {
setMode (MODE_DEFRAG_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-help", 2)) {
usage ();
exit (0);
} else
if (matchPrefix (argv[i], "-long", 2) || matchPrefix (argv[i],"-verbose", 3)) {
setPPstyle (PP_VERBOSE, i, argv[i]);
} else
if (matchPrefix (argv[i],"-xml", 2)){
setPPstyle (PP_XML, i, argv[i]);
} else
if (matchPrefix (argv[i],"-attributes", 3)){
if( !argv[i+1] ) {
fprintf( stderr, "%s: -attributes requires one additional argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
i++;
} else
if (matchPrefix (argv[i], "-run", 2) || matchPrefix(argv[i], "-claimed", 3)) {
setMode (MODE_STARTD_RUN, i, argv[i]);
} else
if( matchPrefix (argv[i], "-cod", 4) ) {
setMode (MODE_STARTD_COD, i, argv[i]);
} else
if (matchPrefix (argv[i], "-java", 2)) {
/*explicit_mode =*/ javaMode = true;
} else
if (matchPrefix (argv[i], "-absent", 3)) {
/*explicit_mode =*/ absentMode = true;
} else
if (matchPrefix (argv[i], "-vm", 3)) {
/*explicit_mode =*/ vmMode = true;
} else
if (matchPrefix (argv[i], "-server", 3)) {
setPPstyle (PP_STARTD_SERVER, i, argv[i]);
} else
if (matchPrefix (argv[i], "-state", 5)) {
setPPstyle (PP_STARTD_STATE, i, argv[i]);
} else
if (matchPrefix (argv[i], "-statistics", 6)) {
if( statistics ) {
free( statistics );
had_statistics_error = 1;
}
i++;
if( ! argv[i] ) {
fprintf( stderr, "%s: -statistics requires another argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
statistics = strdup( argv[i] );
} else
if (matchPrefix (argv[i], "-startd", 5)) {
setMode (MODE_STARTD_NORMAL,i, argv[i]);
} else
if (matchPrefix (argv[i], "-schedd", 3)) {
setMode (MODE_SCHEDD_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-grid", 2)) {
setMode (MODE_GRID_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-subsystem", 5)) {
i++;
if( !argv[i] ) {
fprintf( stderr, "%s: -subsystem requires another argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
if (matchPrefix (argv[i], "schedd", 6)) {
setMode (MODE_SCHEDD_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "startd", 6)) {
setMode (MODE_STARTD_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "quill", 5)) {
setMode (MODE_QUILL_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "negotiator", 10)) {
setMode (MODE_NEGOTIATOR_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "master", 6)) {
setMode (MODE_MASTER_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "collector", 9)) {
setMode (MODE_COLLECTOR_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "generic", 7)) {
setMode (MODE_GENERIC_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "had", 3)) {
setMode (MODE_HAD_NORMAL, i, argv[i]);
} else
if (*argv[i] == '-') {
fprintf(stderr, "%s: -subsystem requires another argument\n",
myName);
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit(1);
} else {
genericType = strdup(argv[i]);
setMode (MODE_OTHER, i, argv[i]);
}
} else
#ifdef HAVE_EXT_POSTGRESQL
if (matchPrefix (argv[i], "-quill", 2)) {
setMode (MODE_QUILL_NORMAL, i, argv[i]);
} else
#endif /* HAVE_EXT_POSTGRESQL */
if (matchPrefix (argv[i], "-license", 3)) {
setMode (MODE_LICENSE_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-storage", 4)) {
setMode (MODE_STORAGE_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-negotiator", 2)) {
setMode (MODE_NEGOTIATOR_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-generic", 3)) {
setMode (MODE_GENERIC_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-any", 3)) {
setMode (MODE_ANY_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-sort", 3)) {
i++;
if( ! argv[i] ) {
fprintf( stderr, "%s: -sort requires another argument\n",
myName );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
if (MATCH == strcasecmp(argv[i], "false") ||
MATCH == strcasecmp(argv[i], "0") ||
MATCH == strcasecmp(argv[i], "no") ||
MATCH == strcasecmp(argv[i], "none"))
{
noSort = true;
continue;
}
int jsort = sortSpecs.size();
SortSpec ss;
ExprTree* sortExpr = NULL;
if (ParseClassAdRvalExpr(argv[i], sortExpr)) {
fprintf(stderr, "Error: Parse error of: %s\n", argv[i]);
exit(1);
}
ss.expr = sortExpr;
ss.arg = argv[i];
formatstr(ss.keyAttr, "CondorStatusSortKey%d", jsort);
formatstr(ss.keyExprAttr, "CondorStatusSortKeyExpr%d", jsort);
string exprString;
formatstr(exprString, "MY.%s < TARGET.%s", ss.keyAttr.c_str(), ss.keyAttr.c_str());
if (ParseClassAdRvalExpr(exprString.c_str(), sortExpr)) {
fprintf(stderr, "Error: Parse error of: %s\n", exprString.c_str());
exit(1);
}
ss.exprLT = sortExpr;
formatstr(exprString, "MY.%s == TARGET.%s", ss.keyAttr.c_str(), ss.keyAttr.c_str());
if (ParseClassAdRvalExpr(exprString.c_str(), sortExpr)) {
fprintf(stderr, "Error: Parse error of: %s\n", exprString.c_str());
exit(1);
}
ss.exprEQ = sortExpr;
sortSpecs.push_back(ss);
// the silent constraint TARGET.%s =!= UNDEFINED is added
// as a customAND constraint on the second pass
} else
if (matchPrefix (argv[i], "-submitters", 5)) {
setMode (MODE_SCHEDD_SUBMITTORS, i, argv[i]);
} else
if (matchPrefix (argv[i], "-master", 2)) {
setMode (MODE_MASTER_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-collector", 4)) {
setMode (MODE_COLLECTOR_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-world", 2)) {
setMode (MODE_COLLECTOR_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-ckptsrvr", 3)) {
setMode (MODE_CKPT_SRVR_NORMAL, i, argv[i]);
} else
if (matchPrefix (argv[i], "-total", 2)) {
wantOnlyTotals = 1;
explicit_format = true;
} else
if (matchPrefix(argv[i], "-expert", 2)) {
expert = true;
} else
if (matchPrefix(argv[i], "-version", 4)) {
printf( "%s\n%s\n", CondorVersion(), CondorPlatform() );
exit(0);
} else
if (*argv[i] == '-') {
fprintf (stderr, "Error: Unknown option %s\n", argv[i]);
usage ();
exit (1);
}
}
if( had_pool_error ) {
fprintf( stderr,
"Warning: Multiple -pool arguments given, using \"%s\"\n",
pool->name() );
}
if( had_direct_error ) {
fprintf( stderr,
"Warning: Multiple -direct arguments given, using \"%s\"\n",
direct );
}
if( had_statistics_error ) {
fprintf( stderr,
"Warning: Multiple -statistics arguments given, using \"%s\"\n",
statistics );
}
}
void
secondPass (int argc, char *argv[])
{
const char * pcolon = NULL;
char *daemonname;
for (int i = 1; i < argc; i++) {
// omit parameters which qualify switches
if( matchPrefix(argv[i],"-pool", 2) || matchPrefix(argv[i],"-direct", 4) ) {
i++;
continue;
}
if( matchPrefix(argv[i],"-subsystem", 5) ) {
i++;
continue;
}
if (matchPrefix (argv[i], "-format", 2)) {
pm.registerFormat (argv[i+1], argv[i+2]);
StringList attributes;
ClassAd ad;
if(!ad.GetExprReferences(argv[i+2],attributes,attributes)){
fprintf( stderr, "Error: Parse error of: %s\n", argv[i+2]);
exit(1);
}
attributes.rewind();
char const *s;
while( (s=attributes.next()) ) {
projList.AppendArg(s);
}
if (diagnose) {
printf ("Arg %d --- register format [%s] for [%s]\n",
i, argv[i+1], argv[i+2]);
}
i += 2;
continue;
}
if (*argv[i] == '-' &&
(is_arg_colon_prefix(argv[i]+1, "autoformat", &pcolon, 5) ||
is_arg_colon_prefix(argv[i]+1, "af", &pcolon, 2)) ) {
// make sure we have at least one more argument
if ( !argv[i+1] || *(argv[i+1]) == '-') {
fprintf( stderr, "Error: Argument %s requires "
"at last one attribute parameter\n", argv[i] );
fprintf( stderr, "Use \"%s -help\" for details\n", myName );
exit( 1 );
}
bool flabel = false;
bool fCapV = false;
bool fheadings = false;
const char * pcolpre = " ";
const char * pcolsux = NULL;
if (pcolon) {
++pcolon;
while (*pcolon) {
switch (*pcolon)
{
case ',': pcolsux = ","; break;
case 'n': pcolsux = "\n"; break;
case 't': pcolpre = "\t"; break;
case 'l': flabel = true; break;
case 'V': fCapV = true; break;
case 'h': fheadings = true; break;
}
++pcolon;
}
}
pm.SetAutoSep(NULL, pcolpre, pcolsux, "\n");
while (argv[i+1] && *(argv[i+1]) != '-') {
++i;
ClassAd ad;
StringList attributes;
if(!ad.GetExprReferences(argv[i],attributes,attributes)){
fprintf( stderr, "Error: Parse error of: %s\n", argv[i]);
exit(1);
}
attributes.rewind();
char const *s;
while ((s = attributes.next())) {
projList.AppendArg(s);
}
MyString lbl = "";
int wid = 0;
int opts = FormatOptionNoTruncate;
if (fheadings || pm_head.Length() > 0) {
const char * hd = fheadings ? argv[i] : "(expr)";
wid = 0 - (int)strlen(hd);
opts = FormatOptionAutoWidth | FormatOptionNoTruncate;
pm_head.Append(hd);
}
else if (flabel) { lbl.formatstr("%s = ", argv[i]); wid = 0; opts = 0; }
lbl += fCapV ? "%V" : "%v";
if (diagnose) {
printf ("Arg %d --- register format [%s] width=%d, opt=0x%x for [%s]\n",
i, lbl.Value(), wid, opts, argv[i]);
}
pm.registerFormat(lbl.Value(), wid, opts, argv[i]);
}
// if autoformat list ends in a '-' without any characters after it, just eat the arg and keep going.
if (i+1 < argc && '-' == (argv[i+1])[0] && 0 == (argv[i+1])[1]) {
++i;
}
continue;
}
if (is_dash_arg_colon_prefix(argv[i], "print-format", &pcolon, 2)) {
if ( (i+1 >= argc) || (*(argv[i+1]) == '-' && (argv[i+1])[1] != 0)) {
fprintf( stderr, "Error: Argument -print-format requires a filename argument\n");
exit( 1 );
}
// hack allow -pr ! to disable use of user-default print format files.
if (MATCH == strcmp(argv[i+1], "!")) {
++i;
disable_user_print_files = true;
continue;
}
ppTotalStyle = ppStyle;
setPPstyle (PP_CUSTOM, i, argv[i]);
++i; // skip to the next argument.
if (set_status_print_mask_from_stream(argv[i], true, &mode_constraint) < 0) {
fprintf(stderr, "Error: invalid select file %s\n", argv[i]);
exit (1);
}
if (mode_constraint) {
query->addANDConstraint(mode_constraint);
}
using_print_format = true; // so we can hack totals.
continue;
}
if (matchPrefix (argv[i], "-target", 5)) {
i++;
continue;
}
if (is_dash_arg_prefix(argv[i], "ads", 2)) {
++i;
continue;
}
if( matchPrefix(argv[i], "-sort", 3) ) {
i++;
if ( ! noSort) {
sprintf( buffer, "%s =!= UNDEFINED", argv[i] );
query->addANDConstraint( buffer );
}
continue;
}
if (matchPrefix (argv[i], "-statistics", 6)) {
i += 2;
sprintf(buffer,"STATISTICS_TO_PUBLISH = \"%s\"", statistics);
if (diagnose) {
printf ("[%s]\n", buffer);
}
query->addExtraAttribute(buffer);
continue;
}
if (matchPrefix (argv[i], "-attributes", 3) ) {
// parse attributes to be selected and split them along ","
StringList more_attrs(argv[i+1],",");
char const *s;
more_attrs.rewind();
while( (s=more_attrs.next()) ) {
projList.AppendArg(s);
}
i++;
continue;
}
// figure out what the other parameters should do
if (*argv[i] != '-') {
// display extra information for diagnosis
if (diagnose) {
printf ("Arg %d (%s) --- adding constraint", i, argv[i]);
}
if( !(daemonname = get_daemon_name(argv[i])) ) {
if ( (mode==MODE_SCHEDD_SUBMITTORS) && strchr(argv[i],'@') ) {
// For a submittor query, it is possible that the
// hostname is really a UID_DOMAIN. And there is
// no requirement that UID_DOMAIN actually have
// an inverse lookup in DNS... so if get_daemon_name()
// fails with a fully qualified submittor lookup, just
// use what we are given and do not flag an error.
daemonname = strnewp(argv[i]);
} else {
dprintf_WriteOnErrorBuffer(stderr, true);
fprintf( stderr, "%s: unknown host %s\n",
argv[0], get_host_part(argv[i]) );
exit(1);
}
}
switch (mode) {
case MODE_DEFRAG_NORMAL:
case MODE_STARTD_NORMAL:
case MODE_STARTD_COD:
#ifdef HAVE_EXT_POSTGRESQL
case MODE_QUILL_NORMAL:
#endif /* HAVE_EXT_POSTGRESQL */
case MODE_SCHEDD_NORMAL:
case MODE_SCHEDD_SUBMITTORS:
case MODE_MASTER_NORMAL:
case MODE_COLLECTOR_NORMAL:
case MODE_CKPT_SRVR_NORMAL:
case MODE_NEGOTIATOR_NORMAL:
case MODE_STORAGE_NORMAL:
case MODE_ANY_NORMAL:
case MODE_GENERIC_NORMAL:
case MODE_STARTD_AVAIL:
case MODE_OTHER:
case MODE_GRID_NORMAL:
case MODE_HAD_NORMAL:
sprintf(buffer,"(%s==\"%s\") || (%s==\"%s\")",
ATTR_NAME, daemonname, ATTR_MACHINE, daemonname );
if (diagnose) {
printf ("[%s]\n", buffer);
}
query->addORConstraint (buffer);
break;
case MODE_STARTD_RUN:
sprintf (buffer,"%s == \"%s\"",ATTR_REMOTE_USER,argv[i]);
if (diagnose) {
printf ("[%s]\n", buffer);
}
query->addORConstraint (buffer);
break;
default:
fprintf(stderr,"Error: Don't know how to process %s\n",argv[i]);
}
delete [] daemonname;
daemonname = NULL;
} else
if (matchPrefix (argv[i], "-constraint", 4)) {
if (diagnose) {
printf ("[%s]\n", argv[i+1]);
}
query->addANDConstraint (argv[i+1]);
i++;
}
}
}
int
matchPrefix (const char *s1, const char *s2, int min_len)
{
int lenS1 = strlen (s1);
int lenS2 = strlen (s2);
int len = (lenS1 < lenS2) ? lenS1 : lenS2;
if(len < min_len) {
return 0;
}
return (strncmp (s1, s2, len) == 0);
}
int
lessThanFunc(AttrList *ad1, AttrList *ad2, void *)
{
MyString buf1;
MyString buf2;
int val;
if( !ad1->LookupString(ATTR_OPSYS, buf1) ||
!ad2->LookupString(ATTR_OPSYS, buf2) ) {
buf1 = "";
buf2 = "";
}
val = strcmp( buf1.Value(), buf2.Value() );
if( val ) {
return (val < 0);
}
if( !ad1->LookupString(ATTR_ARCH, buf1) ||
!ad2->LookupString(ATTR_ARCH, buf2) ) {
buf1 = "";
buf2 = "";
}
val = strcmp( buf1.Value(), buf2.Value() );
if( val ) {
return (val < 0);
}
if( !ad1->LookupString(ATTR_MACHINE, buf1) ||
!ad2->LookupString(ATTR_MACHINE, buf2) ) {
buf1 = "";
buf2 = "";
}
val = strcmp( buf1.Value(), buf2.Value() );
if( val ) {
return (val < 0);
}
if (!ad1->LookupString(ATTR_NAME, buf1) ||
!ad2->LookupString(ATTR_NAME, buf2))
return 0;
return ( strcmp( buf1.Value(), buf2.Value() ) < 0 );
}
int
customLessThanFunc( AttrList *ad1, AttrList *ad2, void *)
{
classad::Value lt_result;
bool val;
for (unsigned i = 0; i < sortSpecs.size(); ++i) {
if (EvalExprTree(sortSpecs[i].exprLT, ad1, ad2, lt_result)
&& lt_result.IsBooleanValue(val) ) {
if( val ) {
return 1;
} else {
if (EvalExprTree( sortSpecs[i].exprEQ, ad1,
ad2, lt_result ) &&
( !lt_result.IsBooleanValue(val) || !val )){
return 0;
}
}
} else {
return 0;
}
}
return 0;
}<|fim▁end|> | extern void setType (const char *, int, const char *);
extern void setMode (Mode, int, const char *);
|
<|file_name|>DeleteOptionsUI.py<|end_file_name|><|fim▁begin|>##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferScene
##########################################################################<|fim▁hole|>
GafferScene.DeleteOptions,
"description",
"""
A node which removes options from the globals.
""",
plugs = {
"names" : [
"description",
"""
The names of options to be removed. Names should be
separated by spaces and can use Gaffer's standard wildcards.
""",
],
"invertNames" : [
"description",
"""
When on, matching names are kept, and non-matching names are removed.
""",
],
}
)<|fim▁end|> | # Metadata
##########################################################################
Gaffer.Metadata.registerNode( |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# encoding: utf-8
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()<|fim▁hole|>setup(
name="cubehelix",
version="0.1.0",
author="James Davenport",
# author_email="",
description="Cubehelix colormaps for matplotlib",
long_description=read('README.md'),
# license="BSD",
py_modules=['cubehelix'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Visualization",
# "License :: OSI Approved :: BSD License",
]
)<|fim▁end|> | |
<|file_name|>test_playaudio.py<|end_file_name|><|fim▁begin|>from nose.tools import assert_equal, assert_greater, assert_greater_equal, assert_less, assert_raises
import time
import numpy as np
import audioio.playaudio as ap
import audioio.audiomodules as am
def test_beep():
am.enable_module()
print()
print('default module...')
ap.beep(blocking=True)
ap.beep(0.5, 'a4', blocking=True)
ap.beep(blocking=False)
time.sleep(2.0)
ap.handle.close()
for lib in am.installed_modules('device'):
print('%s module...' % lib)
am.select_module(lib)
ap.beep(blocking=True, verbose=2)
ap.beep(blocking=False, verbose=2)
time.sleep(2.0)
ap.handle.close()
am.enable_module()
def test_play():
am.enable_module()
print()
# sine wave:
rate = 44100.0
t = np.arange(0.0, 0.5, 1.0/rate)
mono_data = np.sin(2.0*np.pi*800.0*t)
stereo_data = np.tile(mono_data, (2, 1)).T
# fade in and out:
ap.fade(mono_data, rate, 0.1)
ap.fade(stereo_data, rate, 0.1)
print('default module mono...')
ap.play(mono_data, rate, blocking=True)
ap.play(mono_data, rate, blocking=False)
time.sleep(2.0)
print('default module stereo...')
ap.play(stereo_data, rate, blocking=True)
ap.play(stereo_data, rate, blocking=False)
time.sleep(2.0)
ap.handle.close()
for lib in am.installed_modules('device'):
print('%s module mono...' % lib)
am.select_module(lib)
ap.play(mono_data, rate, blocking=True, verbose=2)
ap.play(mono_data, rate, blocking=False, verbose=2)
time.sleep(2.0)
print('%s module stereo...' % lib)
ap.play(stereo_data, rate, blocking=True)
ap.play(stereo_data, rate, blocking=False)
time.sleep(2.0)
ap.handle.close()<|fim▁hole|> def sinewave(rate):
t = np.arange(0.0, 0.5, 1.0/rate)
mono_data = np.sin(2.0*np.pi*800.0*t)
stereo_data = np.tile(mono_data, (2, 1)).T
# fade in and out:
ap.fade(mono_data, rate, 0.1)
ap.fade(stereo_data, rate, 0.1)
return mono_data, stereo_data
am.enable_module()
print()
for lib in am.installed_modules('device'):
am.select_module(lib)
print('%s module ...' % lib)
for rate in [45555.0, 100000.0, 600000.0]:
print(' rate %.0f Hz ...' % rate)
mono_data, stereo_data = sinewave(rate)
ap.play(mono_data, rate, verbose=2)
ap.play(stereo_data, rate, verbose=2)
ap.handle.close()
am.enable_module()
def test_note2freq():
fa = 460.0
assert_less(np.abs(ap.note2freq('a4', fa)-fa), 1e-6, 'wrong a4 frequency')
fp = 0.5*ap.note2freq('a0')
for o in range(10):
for n in 'cdefgab':
note = '%s%d' % (n, o)
f = ap.note2freq(note)
assert_greater(f, fp, 'frequency of %s should be greater than the one of previous note' % note)
note = '%s#%d' % (n, o)
fs = ap.note2freq(note)
assert_greater(fs, f, 'frequency of %s should be greater' % note)
note = '%sb%d' % (n, o)
fb = ap.note2freq(note)
assert_less(fb, f, 'frequency of %s should be greater' % note)
fp = f
assert_raises(ValueError, ap.note2freq, 'h')
assert_raises(ValueError, ap.note2freq, 'da')
assert_raises(ValueError, ap.note2freq, 'dx#')
assert_raises(ValueError, ap.note2freq, 'd4#')
assert_raises(ValueError, ap.note2freq, 'd4x')
assert_raises(ValueError, ap.note2freq, 'd#4x')
assert_raises(ValueError, ap.note2freq, 'd-2')
assert_raises(ValueError, ap.note2freq, '')
assert_raises(ValueError, ap.note2freq, 0)
def test_demo():
am.enable_module()
ap.demo()
def test_main():
am.enable_module()
ap.main(['prog', '-h'])
ap.main(['prog'])
ap.main(['prog', '-m', 'sounddevice'])
ap.main(['prog', 'x'])<|fim▁end|> | am.enable_module()
def test_downsample(): |
<|file_name|>translation.py<|end_file_name|><|fim▁begin|>import unittest
import requests
class TranslationTests(unittest.TestCase):
def setUp(self):
self.url = 'http://127.0.0.1/api/translate'
def test_given_words(self):
"""Should pass for the basic test cases provided"""
test_words = ['pig', 'banana', 'trash', 'happy', 'duck', 'glove',
'eat', 'omelet', 'are']
expected_words = ['igpay', 'ananabay', 'ashtray', 'appyhay', 'uckday',
'oveglay', 'eatyay', 'omeletyay', 'areyay']
responses = [requests.post(self.url, x).text for x in test_words]
self.assertEqual(responses, expected_words,
'Should pass for the basic test cases provided')
def test_capitalization(self):
"""Should preserve capitalization in words"""
test_words = ['Capitalized', 'Words', 'Should', 'Work']
expected_words = ['Apitalizedcay', 'Ordsway', 'Ouldshay', 'Orkway']
responses = [requests.post(self.url, x).text for x in test_words]
self.assertEqual(responses, expected_words,
'Words should preserve their capitalization')<|fim▁hole|> test_sentence = ('Long sentences should retain their capitalization, '
'as well as punctuation - hopefully!!')
expected_result = ('Onglay entencessay ouldshay etainray eirthay '
'apitalizationcay, asyay ellway asyay unctuationpay'
' - opefullyhay!!')
response = requests.post(self.url, test_sentence).text
self.assertEqual(response, expected_result,
'Should translate sentences accurately')
def test_edge_cases(self):
"""Should be able to handle words with no vowels"""
test_word = 'sky'
expected_result = 'skyay'
response = requests.post(self.url, test_word).text
self.assertEqual(response, expected_result,
'Should be able to translate words without vowels')
def test_error_cases(self):
"""Should return errors for invalid input"""
self.assertEqual(requests.post(self.url, '').status_code, 406,
'Should return HTTP/406 for empty strings')
def test_long_paragraphs(self):
"""Should translate long paragraphs with new lines intact"""
self.maxDiff = None
expected_result = ''
test_paragraph = ''
with open('tests/lorem_ipsum.txt') as input_paragraph:
test_paragraph = input_paragraph.read()
with open('tests/lorem_ipsum_translated.txt') as expected:
expected_result = expected.read()
response = requests.post(self.url, test_paragraph).text
self.assertEqual(response, expected_result,
'Should translate long paragraphs accurately')
if __name__ == '__main__':
unittest.main()<|fim▁end|> |
def test_sentences(self):
"""Should translate sentences with preserved punctuation""" |
<|file_name|>group.py<|end_file_name|><|fim▁begin|>from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_group_form(self, group):
wd = self.app.wd
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)<|fim▁hole|> self.group_cache = None
def select_first_group(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def modify_first_group(self):
self.modify_group_by_index(0)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def modify_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(id)
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)<|fim▁end|> | # submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page() |
<|file_name|>wtf-app.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding=utf8
from flask import Flask, render_template
from flask.ext.jqueryuibootstrap import JqueryUiBootstrap
from flask.ext.wtf import (
Form,
RecaptchaField,
)
from wtforms import (
TextField,
HiddenField,
ValidationError,
)
from wtforms.validators import (
Required,
)
app = Flask(__name__)
JqueryUiBootstrap(app)
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = '6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
@app.route('/', methods=('GET', 'POST',))
def index():
form = ExampleForm()
if form.validate_on_submit():<|fim▁hole|>
if '__main__' == __name__:
app.run(debug=True)<|fim▁end|> | return "PASSED"
return render_template('example.html', form=form)
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Sylvain Afchain <[email protected]>
#<|fim▁hole|># a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.<|fim▁end|> | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain |
<|file_name|>locales.js<|end_file_name|><|fim▁begin|>SirTrevor.Locales = {
en: {
general: {
'delete': 'Delete?',
'drop': 'Drag __block__ here',
'paste': 'Or paste URL here',
'upload': '...or choose a file',
'close': 'close',
'position': 'Position',
'wait': 'Please wait...',
'link': 'Enter a link'
},
errors: {
'title': "You have the following errors:",
'validation_fail': "__type__ block is invalid",
'block_empty': "__name__ must not be empty",
'type_missing': "You must have a block of type __type__",
'required_type_empty': "A required block type __type__ is empty",
'load_fail': "There was a problem loading the contents of the document"
},
blocks: {
text: {
'title': "Text"
},
columns: {
'title': "Columns"
},
list: {
'title': "List"
},
quote: {
'title': "Quote",
'credit_field': "Credit"
},
image: {
'title': "Image",
'upload_error': "There was a problem with your upload"
},
video: {
'title': "Video"
},
tweet: {<|fim▁hole|> embedly: {
'title': "Embedly",
'fetch_error': "There was a problem fetching your embed",
'key_missing': "An Embedly API key must be present"
},
heading: {
'title': "Heading"
}
}
}
};
if (window.i18n === undefined || window.i18n.init === undefined) {
// Minimal i18n stub that only reads the English strings
SirTrevor.log("Using i18n stub");
window.i18n = {
t: function(key, options) {
var parts = key.split(':'), str, obj, part, i;
obj = SirTrevor.Locales[SirTrevor.LANGUAGE];
for(i = 0; i < parts.length; i++) {
part = parts[i];
if(!_.isUndefined(obj[part])) {
obj = obj[part];
}
}
str = obj;
if (!_.isString(str)) { return ""; }
if (str.indexOf('__') >= 0) {
_.each(options, function(value, opt) {
str = str.replace('__' + opt + '__', value);
});
}
return str;
}
};
} else {
SirTrevor.log("Using i18next");
// Only use i18next when the library has been loaded by the user, keeps
// dependencies slim
i18n.init({ resStore: SirTrevor.Locales, fallbackLng: SirTrevor.LANGUAGE,
ns: { namespaces: ['general', 'blocks'], defaultNs: 'general' }
});
}<|fim▁end|> | 'title': "Tweet",
'fetch_error': "There was a problem fetching your tweet"
}, |
<|file_name|>index.js<|end_file_name|><|fim▁begin|><|fim▁hole|> store: require('./store'),
task: require('./task')
};<|fim▁end|> | module.exports = {
error: require('./error'), |
<|file_name|>TestMergeTracker.py<|end_file_name|><|fim▁begin|>'''
Unit tests for MergeTracker.py
Verification tracking of which comps have been merged already
works as expected and produces valid models.
'''
import numpy as np
import unittest
from bnpy.learnalg import MergeTracker
class TestMergeTracker(unittest.TestCase):
def shortDescription(self):
return None
def setUp(self):<|fim▁hole|> print MT.excludeList
MT.recordResult(0, 1, True)
with self.assertRaises(AssertionError):
MT.recordResult(0, 1, True)
def test_recordMergeResult_assertRaisesWhenCompAlreadyPartOfMerge(self):
MT = MergeTracker(4)
print MT.excludeList
MT.recordResult(2, 3, True)
with self.assertRaises(AssertionError):
MT.recordResult(0, 2, False)
with self.assertRaises(AssertionError):
MT.recordResult(1, 2, False)
def test_recordMergeResult_assertRaisesOnRepeatPair2(self):
MT = MergeTracker(6)
MT.recordResult(0, 1, False)
MT.recordResult(0, 2, False)
MT.recordResult(0, 3, False)
MT.recordResult(0, 4, True)
MT.recordResult(1, 2, True)
assert len(MT.excludePairs[1]) == MT.K
with self.assertRaises(AssertionError):
MT.recordResult(1, 2, False)
def test_recordMergeResult(self):
MT = MergeTracker(6)
MT.recordResult(0, 1, False)
MT.recordResult(0, 2, False)
MT.recordResult(0, 3, False)
assert len(MT.excludeList) == 0
MT.recordResult(0, 4, True)
assert 0 in MT.excludeList
assert 1 not in MT.excludeList
MT.recordResult(1, 2, True)
assert 1 in MT.excludeList
assert 2 not in MT.excludeList
MT.recordResult(2, 3, True)
assert 2 in MT.excludeList
assert MT.K == 3
assert MT.OrigK == 6
assert (0,4) in MT.acceptedOrigIDs
assert (1,2) in MT.acceptedOrigIDs
assert (3,5) in MT.acceptedOrigIDs
def test_synchronize_catch_former_bug1(self):
''' Given un-synched excludeList and excludePairs,
verify that the synchronization will discover (correctly)
that no pairs are left
This prevents relapse of a bug captured in Jan 2013
'''
MT = MergeTracker(6)
MT.excludeList = set([0, 2, 1, 4])
MT.excludePairs[0] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[1] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[2] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[3] = set([0, 1, 2, 3, 5])
MT.excludePairs[4] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[5] = set([0, 1, 2, 3, 5])
MT._synchronize_and_verify()
for k in range(6):
assert k in MT.excludeList
assert not MT.hasAvailablePairs()
def test_synchronize_catch_former_bug2(self):
''' Given un-synched excludeList and excludePairs,
verify that the synchronization will discover (correctly)
that no pairs are left
This prevents relapse of a bug captured in Jan 2013
'''
MT = MergeTracker(6)
MT.excludeList = set([1, 4, 2, 3])
MT.excludePairs[0] = set([0, 1, 3, 4, 5])
MT.excludePairs[1] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[2] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[3] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[4] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[5] = set([0, 1, 3, 4, 5])
MT._synchronize_and_verify()
for k in range(6):
assert k in MT.excludeList
assert not MT.hasAvailablePairs()
def test_synchronize_catch_former_bug3(self):
'''
This prevents relapse of a bug captured in Jan 2013
'''
MT = MergeTracker(7)
MT.excludeList = set([3, 0, 2, 6])
MT.excludePairs[0] = set([0, 1, 2, 3, 4, 5, 6])
MT.excludePairs[1] = set([0, 1, 2, 3, 5])
MT.excludePairs[2] = set([0, 1, 2, 3, 4, 5, 6])
MT.excludePairs[3] = set([0, 1, 2, 3, 4, 5, 6])
MT.excludePairs[4] = set([0, 2, 3, 4, 5])
MT.excludePairs[5] = set([0, 1, 2, 3, 4, 5])
MT.excludePairs[6] = set([0, 1, 2, 3, 4, 5, 6])
MT._synchronize_and_verify()
assert 1 in MT.getAvailableComps()
assert 4 in MT.getAvailableComps()
assert 5 in MT.excludePairs[1]
assert 1 in MT.excludePairs[5]
assert 6 in MT.excludePairs[4]
assert 6 in MT.excludePairs[1]<|fim▁end|> | pass
def test_recordMergeResult_assertRaisesOnRepeatPair(self):
MT = MergeTracker(4) |
<|file_name|>rbf.py<|end_file_name|><|fim▁begin|>import numpy as np
import tensorflow as tf
from .module import Module
class RBFExpansion(Module):
def __init__(self, low, high, gap, dim=1, name=None):
self.low = low
self.high = high<|fim▁hole|> self.centers = np.linspace(low, high, int(np.ceil(xrange / gap)))
self.centers = self.centers[:, np.newaxis]
self.n_centers = len(self.centers)
self.fan_out = self.dim * self.n_centers
super(RBFExpansion, self).__init__(name)
def _forward(self, d):
cshape = tf.shape(d)
CS = d.get_shape()
centers = self.centers.reshape((1, -1)).astype(np.float32)
d -= tf.constant(centers)
rbf = tf.exp(-(d ** 2) / self.gap)
# rbf = tf.reshape(rbf, (
# cshape[0], cshape[1], cshape[2],
# self.dim * centers.shape[-1]))
rbf.set_shape([CS[0], self.fan_out])
return rbf<|fim▁end|> | self.gap = gap
self.dim = dim
xrange = high - low |
<|file_name|>Study.js<|end_file_name|><|fim▁begin|>"use strict";
/* istanbul ignore file */
/* tslint:disable */<|fim▁hole|><|fim▁end|> | /* eslint-disable */
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=Study.js.map |
<|file_name|>setup_win.py<|end_file_name|><|fim▁begin|>from distutils.core import setup
import py2exe
import os, sys
from glob import glob
import PyQt5
data_files=[('',['C:/Python34/DLLs/sqlite3.dll','C:/Python34/Lib/site-packages/PyQt5/icuuc53.dll','C:/Python34/Lib/site-packages/PyQt5/icudt53.dll','C:/Python34/Lib/site-packages/PyQt5/icuin53.dll','C:/Python34/Lib/site-packages/PyQt5/Qt5Gui.dll','C:/Python34/Lib/site-packages/PyQt5/Qt5Core.dll','C:/Python34/Lib/site-packages/PyQt5/Qt5Widgets.dll']),
('data',['data/configure','data/model.sqlite','data/loading.jpg']),<|fim▁hole|>qt_platform_plugins = [("platforms", glob(PyQt5.__path__[0] + r'\plugins\platforms\*.*'))]
data_files.extend(qt_platform_plugins)
msvc_dlls = [('.', glob(r'''C:/Windows/System32/msvc?100.dll'''))]
data_files.extend(msvc_dlls)
setup(
windows = ["ChemDB.py"],
zipfile = None,
data_files = data_files,
options = {
'py2exe': {
'includes' : ['sip','PyQt5.QtCore','PyQt5.QtGui',"sqlite3",'xlrd','xlwt',"_sqlite3","PyQt5"],
}
},
)<|fim▁end|> | ('platforms',['C:/Python34/Lib/site-packages/PyQt5/plugins/platforms/qminimal.dll','C:/Python34/Lib/site-packages/PyQt5/plugins/platforms/qoffscreen.dll','C:/Python34/Lib/site-packages/PyQt5/plugins/platforms/qwindows.dll'])
] |
<|file_name|>parser.rs<|end_file_name|><|fim▁begin|>use std::mem;
use std::collections::HashMap;
use super::lexer::{Lexer, LexError};
use super::token::{TokenSpan, Token, Lit};
use super::ast::*; // TODO: remove * import
pub struct Parser<'a> {
lexer: Lexer<'a>,
curr: Option<TokenSpan>,
last: Option<TokenSpan>,
peek: Option<TokenSpan>,
}
impl<'a> Parser<'a> {
pub fn from_query(q: &'a str) -> Parser<'a> {
let lex = Lexer::from_query(q);
Parser {
lexer: lex,
curr: None,
last: None,
peek: None,
}
}
pub fn parse(&mut self) -> Result<Query, ParserError> {
// TODO: handle this with a better error message
try!(self.bump());
try!(self.bump());
self.parse_commands()
}
fn bump(&mut self) -> Result<(), ParserError> {
// do stuff. Mainly swap last = curr, curr = peek, then peek = next_real
mem::swap(&mut self.last, &mut self.curr);
mem::swap(&mut self.curr, &mut self.peek);
self.peek = try!(self.lexer.next_real());
Ok(())
}
// SQL Commands
fn parse_commands(&mut self) -> Result<Query, ParserError> {
let curr = self.curr.clone();
// Parse first word that
match curr.unwrap().token {
Token::Word(val) => self.run_major_command(val),
_ => Err(ParserError::FirstCmdNotWord),
}
}
fn run_major_command(&mut self, cmd: String) -> Result<Query, ParserError> {
match Keyword::from_str(&*cmd).unwrap() { // TODO: clean up unwrap
Keyword::Select => self.parse_select(),
Keyword::Insert => self.parse_insert(),
_ => Err(ParserError::FirstCmdNotMajor),
}
}
<|fim▁hole|> loop {
match self.expect_word() {
Ok(ref word) => {
cols.push(Col { name: word.to_owned() });
match try!(self.peek_clone()) {
Token::Comma => try!(self.bump()),
Token::Word(_) => break,
token => return Err(ParserError::ExpectedToken(Token::Comma, format!("{:?}", token))),
}
},
Err(err) => return Err(err),
}
}
try!(self.expect_keyword(Keyword::From));
let table = {
let name = try!(self.expect_word());
Table {
name: name,
alias: None,
}
};
try!(self.expect_token(Token::Semi));
Ok(Query::Table(TableStmt::Select(SelectStmt {
cols: cols,
table: table,
})))
}
fn parse_insert(&mut self) -> Result<Query, ParserError> {
// TODO: impl parse_insert
try!(self.expect_keyword(Keyword::Into));
let table = {
let name = try!(self.expect_word());
Table {
name: name,
alias: None,
}
};
let cols = try!(self.expect_comma_dil_word());
try!(self.expect_keyword(Keyword::Values));
let mut values = try!(self.expect_comma_dil_lit());
if cols.capacity() != values.capacity() {
return Err(ParserError::ColumsDoNotMatchValues);
}
let mut cols_map = HashMap::new();
for col in cols {
let col = Col { name: col };
cols_map.insert(col, values.remove(0));
}
try!(self.expect_token(Token::Semi));
Ok(Query::Table(TableStmt::Insert(InsertStmt {
table: table,
cols: cols_map,
})))
}
}
// Helper function
impl<'a> Parser<'a> {
fn expect_keyword(&mut self, exp: Keyword) -> Result<Keyword, ParserError> {
// TODO: clean up unwrap but they should be safe for the moment
try!(self.bump());
let curr = {
let token = &self.curr.clone().unwrap();
match &token.token {
&Token::Word(ref word) => word.clone(),
t => return Err(ParserError::ExpectedKeyword(exp, format!("{:?}", t))),
}
};
let actual = try!(Keyword::from_str(&curr));
if actual == exp {
Ok(actual)
} else {
Err(ParserError::ExpectedKeyword(exp, curr))
}
}
fn expect_token(&mut self, exp: Token) -> Result<Token, ParserError> {
try!(self.bump());
let token = self.curr.clone().unwrap();
let actual = token.token.clone();
if actual == exp {
Ok(actual)
} else {
Err(ParserError::ExpectedToken(exp, format!("{:?}", actual)))
}
}
// expect word case insensitive.
fn expect_word(&mut self) -> Result<String, ParserError> {
try!(self.bump());
let token = match self.curr.clone() {
Some(t) => t,
None => return Err(ParserError::ExpectedTokenButGotNone),
};
let actual = token.token.clone();
let word = match actual {
Token::Word(ref word) => word.to_lowercase(), // always lowercase
t => return Err(ParserError::ExpectedToken(Token::Word(String::new()), format!("{:?}", t))),
};
Ok(word)
}
fn expect_lit(&mut self) -> Result<Lit, ParserError> {
try!(self.bump());
let token = self.curr.clone().unwrap();
let actual = token.token.clone();
match actual {
Token::Literal(lit) => Ok(lit),
t => Err(ParserError::ExpectedToken(Token::Literal(Lit::String(String::new())), format!("{:?}", t))),
}
}
fn expect_comma_dil_lit(&mut self) -> Result<Vec<Lit>, ParserError> {
try!(self.expect_token(Token::ParentOP));
let mut cols = Vec::new();
loop {
match self.expect_lit() {
Ok(ref lit) => {
cols.push(lit.clone());
match try!(self.peek_clone()) {
Token::Comma => try!(self.bump()),
Token::ParentCL => {
try!(self.bump());
break;
},
token => return Err(ParserError::ExpectedToken(Token::Comma, format!("{:?}", token))),
}
},
Err(err) => return Err(err),
}
}
Ok(cols)
}
fn expect_comma_dil_word(&mut self) -> Result<Vec<String>, ParserError> {
try!(self.expect_token(Token::ParentOP));
let mut cols = Vec::new();
loop {
match self.expect_word() {
Ok(ref word) => {
cols.push(word.to_owned());
match try!(self.peek_clone()) {
Token::Comma => try!(self.bump()),
Token::ParentCL => {
try!(self.bump());
break;
},
token => return Err(ParserError::ExpectedToken(Token::Comma, format!("{:?}", token))),
}
},
Err(err) => return Err(err),
}
}
Ok(cols)
}
fn peek_clone(&mut self) -> Result<Token, ParserError> {
let peek = try!(Self::unwrap_tokenspan(self.peek.clone()));
Ok(peek.token)
}
fn unwrap_tokenspan(t: Option<TokenSpan>) -> Result<TokenSpan, ParserError> {
match t {
Some(val) => Ok(val),
None => Err(ParserError::ExpectedTokenButGotNone),
}
}
}
#[derive(Debug, PartialEq)]
pub enum Keyword {
// Major
Select,
Insert,
// Minor
From,
Into,
Values,
}
impl Keyword {
pub fn from_str(k: &str) -> Result<Keyword, ParserError> {
let keyword = match &*k.to_lowercase() {
"select" => Keyword::Select,
"insert" => Keyword::Insert,
"from" => Keyword::From,
"into" => Keyword::Into,
"values" => Keyword::Values,
// Keyword not found
keyword => return Err(ParserError::UnexpectedKeyword(keyword.to_owned())), // TODO: clean up panic
};
Ok(keyword)
}
}
#[derive(Debug)]
pub enum ParserError {
InvalidCommand,
LexerError(LexError),
FirstCmdNotWord,
FirstCmdNotMajor,
ColumsDoNotMatchValues,
ExpectedKeyword(Keyword, String), // exp, actual
ExpectedToken(Token, String), // exp, actual
ExpectedTokenButGotNone,
UnexpectedKeyword(String),
}
impl From<LexError> for ParserError {
fn from(e: LexError) -> ParserError {
ParserError::LexerError(e)
}
}
#[cfg(test)]
mod test {
use super::*;
use super::super::token::*;
use super::super::ast::*;
use std::collections::HashMap;
#[test]
fn select() {
let mut p = Parser::from_query("select name, email from users;");
let q = p.parse().unwrap();
let q_exp = Query::Table(TableStmt::Select(SelectStmt {
table: Table {
name: "users".into(),
alias: None
},
cols: vec![Col { name: "name".into() }, Col { name: "email".into() }]
}));
assert_eq!(q_exp, q);
}
#[test]
#[should_panic]
fn select_panic() {
let mut p = Parser::from_query("select name, email users;");
p.parse().unwrap();
}
#[test]
fn insert() {
let mut p = Parser::from_query("INSERT INTO users (name, email) VALUES (\"first last\", \"[email protected]\");");
let q = p.parse().unwrap();
let mut cols = HashMap::new();
cols.insert(Col { name: "name".to_owned() }, Lit::String("first last".to_owned()));
cols.insert(Col { name: "email".to_owned() }, Lit::String("[email protected]".to_owned()));
let q_exp = Query::Table(TableStmt::Insert(InsertStmt {
table: Table {
name: "users".to_owned(),
alias: None,
},
cols: cols,
}));
assert_eq!(q_exp, q);
}
#[test]
#[should_panic]
fn insert_no_table_name() {
Parser::from_query("insert into (asdf aslkdfhjahh dsfkjals)").parse().unwrap();
}
#[test]
#[should_panic]
fn insert_non_proper_col_list() {
Parser::from_query("insert into users (asdf aslkdfhjahh dsfkjals)").parse().unwrap();
}
#[test]
#[should_panic]
fn first_non_major() {
let err = Parser::from_query("alskdfj").parse();
assert!(err.is_err());
}
}<|fim▁end|> | fn parse_select(&mut self) -> Result<Query, ParserError> {
let mut cols = Vec::new();
|
<|file_name|>config.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2018 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import "time"
// Config file representation for telemetry plugin<|fim▁hole|> // Allows to disable plugin
Disabled bool `json:"disabled"`
}
// getConfig returns telemetry plugin file configuration if exists
func (p *Plugin) getConfig() (*Config, error) {
config := &Config{}
found, err := p.Cfg.LoadValue(config)
if err != nil {
return nil, err
}
if !found {
p.Log.Debug("Telemetry config not found")
return nil, nil
}
p.Log.Debug("Telemetry config found")
return config, err
}<|fim▁end|> | type Config struct {
// Custom polling interval, default value is 30s
PollingInterval time.Duration `json:"polling-interval"` |
<|file_name|>test_split_partition.py<|end_file_name|><|fim▁begin|>from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["Wes McKinney", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series(
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series(
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None]
)
tm.assert_series_equal(result, expected)
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None])
result = s.str.partition("__", expand=False)
expected = Series(
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("__", expand=False)
expected = Series(
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
)
tm.assert_series_equal(result, expected)
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = s.str.partition(expand=False)
expected = Series(
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition(expand=False)
expected = Series(
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None]
)
tm.assert_series_equal(result, expected)
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None])
tm.assert_series_equal(result, expected)
# unicode
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")])
tm.assert_series_equal(result, expected)
# compare to standard lib
s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False).tolist()
assert result == [v.partition("_") for v in s]
result = s.str.rpartition("_", expand=False).tolist()
assert result == [v.rpartition("_") for v in s]
def test_partition_index():
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
result = values.str.partition("_", expand=False)
exp = Index(
np.array(
[("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition("_", expand=False)
exp = Index(
np.array(
[("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition("_")
exp = Index(
[
("a", "_", "b_c"),
("c", "_", "d_e"),
("f", "_", "g_h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition("_")
exp = Index(
[
("a_b", "_", "c"),
("c_d", "_", "e"),
("f_g", "_", "h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_")
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_")
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=True)
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_", expand=True)
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_partition_with_name(any_string_dtype):
# GH 12617
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
result = s.str.partition(",")
expected = DataFrame(
{0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# should preserve name
result = s.str.partition(",", expand=False)
expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx")
tm.assert_series_equal(result, expected)
def test_partition_index_with_name():
idx = Index(["a,b", "c,d"], name="xxx")
result = idx.str.partition(",")
expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")])
assert result.nlevels == 3
tm.assert_index_equal(result, expected)
# should preserve name
result = idx.str.partition(",", expand=False)
expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx")
assert result.nlevels == 1
tm.assert_index_equal(result, expected)
def test_partition_sep_kwarg(any_string_dtype):
# GH 22676; depr kwarg "pat" in favor of "sep"
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
expected = s.str.partition(sep="_")
result = s.str.partition("_")
tm.assert_frame_equal(result, expected)
expected = s.str.rpartition(sep="_")
result = s.str.rpartition("_")
tm.assert_frame_equal(result, expected)
def test_get():
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
result = ser.str.split("_").str.get(1)
expected = Series(["b", "d", np.nan, "g"])
tm.assert_series_equal(result, expected)
def test_get_mixed_object():
ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0])
result = ser.str.split("_").str.get(1)
expected = Series(["b", np.nan, "d", np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_get_bounds():
ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"])
# positive index
result = ser.str.split("_").str.get(2)
expected = Series(["3", "8", np.nan])<|fim▁hole|> tm.assert_series_equal(result, expected)
# negative index
result = ser.str.split("_").str.get(-3)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
def test_get_complex():
# GH 20671, getting value not in dict raising `KeyError`
ser = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}])
result = ser.str.get(1)
expected = Series([2, 2, np.nan, "a"])
tm.assert_series_equal(result, expected)
result = ser.str.get(-1)
expected = Series([3, 3, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("to_type", [tuple, list, np.array])
def test_get_complex_nested(to_type):
ser = Series([to_type([to_type([1, 2])])])
result = ser.str.get(0)
expected = Series([to_type([1, 2])])
tm.assert_series_equal(result, expected)
result = ser.str.get(1)
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_get_strings(any_string_dtype):
ser = Series(["a", "ab", np.nan, "abc"], dtype=any_string_dtype)
result = ser.str.get(2)
expected = Series([np.nan, np.nan, np.nan, "c"], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)<|fim▁end|> | |
<|file_name|>priorutils.py<|end_file_name|><|fim▁begin|>#significant input and copied functions from T. Morton's VESPA code (all mistakes are my own)
#coords -- RA and DEC of target in degrees. Needed for GAIA querying.
# Degrees, 0-360 and -90 to +90. List format [RA,DEC].
import numpy as np
import pandas as pd
from scipy.integrate import quad
from scipy import stats
import astropy.constants as const
import astropy.units as u
from astropy.coordinates import SkyCoord
import subprocess as sp
import os, re
import time
AU = const.au.cgs.value
RSUN = const.R_sun.cgs.value
REARTH = const.R_earth.cgs.value
MSUN = const.M_sun.cgs.value
DAY = 86400 #seconds
G = const.G.cgs.value
import logging
def semimajor(P,mtotal=1.):
"""
Returns semimajor axis in AU given P in days, total mass in solar masses.
"""
return ((P*DAY/2/np.pi)**2*G*mtotal*MSUN)**(1./3)/AU
def eclipse_probability(R1, R2, P, M1, M2):
return (R1 + R2) *RSUN / (semimajor(P , M1 + M2)*AU)
def centroid_PDF_source(pos,centroiddat):
cent_x, cent_y = centroiddat[0], centroiddat[1]
sig_x, sig_y = centroiddat[2], centroiddat[3]
return stats.multivariate_normal.pdf([pos[0],pos[1]],mean=[cent_x,cent_y],
cov=[[sig_x**(1/2.),0],[0,sig_y**(1/2.)]])
def bgeb_prior(centroid_val, star_density, skyarea, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.12):
'''
Centroid val is value at source (no integration over area). This allows comparison
to planet_prior without having two planet_prior functions.
'''
return centroid_val * skyarea * star_density * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2)
def bgtp_prior(centroid_val, star_density, skyarea, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2):
'''
Centroid val is value at source (no integration over area). This allows comparison
to planet_prior without having two planet_prior functions.
'''
return centroid_val * skyarea * star_density * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp)
def eb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_binary=0.3, f_close=0.027):
'''
centroid pdf at source location
f_binary = 0.3 (moe + di stefano 2017) - valid for 0.8-1.2 Msun!
could improve to be average over all types?
f_close = 0.027 (moe + di stefano 2017) fraction of binaries with P between 3.2-32d
eclipse prob
works for defined source EBs too, just use appropriate centroid pdf value.
'''
return centroid_val * f_binary * f_close * eclipse_probability(r1, r2, P, m1, m2)
def heb_prior(centroid_val, P, r1=1.0, r2=1.0, m1=1.0, m2=1.0, f_triple=0.1, f_close=1.0):
'''
centroid pdf at source location
f_triple = 0.1 (moe + di stefano 2017) - valid for 0.8-1.2 Msun!
could improve to be average over all types?
f_close = 1.0 implies all triples have a close binary. May be over-generous
eclipse prob
'''
return centroid_val * f_triple * f_close * eclipse_probability(r1, r2, P, m1, m2)
def planet_prior(centroid_val, P, r1=1.0, rp=1.0, m1=1.0, mp=0.0, f_planet=0.2957):
'''
centroid pdf at source location
planet occurrence (fressin, any planet<29d)
eclipse prob
works for defined source planets too, just use appropriate centroid pdf value.
possibly needs a more general f_planet - as classifier will be using a range of planets.
should prior then be the prior of being in the whole training set, rather than the specific depth seen?
if so, need to change to 'fraction of ALL stars with planets' (i.e. including EBs etc).
Also look into default radii and masses. Precalculate mean eclipse probability for training set?
'''
return centroid_val * f_planet * eclipse_probability(r1, rp*REARTH/RSUN, P, m1, mp)
def fp_fressin(rp,dr=None):
if dr is None:
dr = rp*0.3
fp = quad(fressin_occurrence,rp-dr,rp+dr)[0]
return max(fp, 0.001) #to avoid zero
def fressin_occurrence(rp):
"""
Occurrence rates per bin from Fressin+ (2013)
"""
rp = np.atleast_1d(rp)
sq2 = np.sqrt(2)
bins = np.array([1/sq2,1,sq2,2,2*sq2,
4,4*sq2,8,8*sq2,
16,16*sq2])
rates = np.array([0,0.155,0.155,0.165,0.17,0.065,0.02,0.01,0.012,0.01,0.002,0])
return rates[np.digitize(rp,bins)]
def trilegal_density(ra,dec,kind='target',maglim=21.75,area=1.0,mapfile=None):
if kind=='interp' and mapfile is None:
print('HEALPIX map file must be passed')
return 0
if kind not in ['target','interp']:
print('kind not recognised. Setting kind=target')
kind = 'target'
if kind=='target':
basefilename = 'trilegal_'+str(ra)+'_'+str(dec)
h5filename = basefilename + '.h5'
if not os.path.exists(h5filename):
get_trilegal(basefilename,ra,dec,maglim=maglim,area=area)
else:
print('Using cached trilegal file. Sky area may be different.')
if os.path.exists(h5filename):
stars = pd.read_hdf(h5filename,'df')
with pd.HDFStore(h5filename) as store:
trilegal_args = store.get_storer('df').attrs.trilegal_args
if trilegal_args['maglim'] < maglim:
print('Re-calling trilegal with extended magnitude range')
get_trilegal(basefilename,ra,dec,maglim=maglim,area=area)
stars = pd.read_hdf(h5filename,'df')
stars = stars[stars['TESS_mag'] < maglim] #in case reading from file
#c = SkyCoord(trilegal_args['l'],trilegal_args['b'],
# unit='deg',frame='galactic')
#self.coords = c.icrs
area = trilegal_args['area']*(u.deg)**2
density = len(stars)/area
return density.value
else:
return 0
else:
import healpy as hp
#interpolate pre-calculated densities
coord = SkyCoord(ra,dec,unit='deg')
if np.abs(coord.galactic.b.value)<5:
print('Near galactic plane, Trilegal density may be inaccurate.')
#Density map will set mag limits
densitymap = hp.read_map(mapfile)
density = hp.get_interp_val(densitymap,ra,dec,lonlat=True)
return density
#maglim of 21 used following sullivan 2015
def get_trilegal(filename,ra,dec,folder='.', galactic=False,
filterset='TESS_2mass_kepler',area=1,maglim=21,binaries=False,
trilegal_version='1.6',sigma_AV=0.1,convert_h5=True):
"""Runs get_trilegal perl script; optionally saves output into .h5 file
Depends on a perl script provided by L. Girardi; calls the
web form simulation, downloads the file, and (optionally) converts
to HDF format.
Uses A_V at infinity from :func:`utils.get_AV_infinity`.
.. note::
Would be desirable to re-write the get_trilegal script
all in python.
:param filename:
Desired output filename. If extension not provided, it will
be added.
:param ra,dec:
Coordinates (ecliptic) for line-of-sight simulation.
:param folder: (optional)
Folder to which to save file. *Acknowledged, file control
in this function is a bit wonky.*
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param area: (optional)
Area of TRILEGAL simulation [sq. deg]
:param maglim: (optional)
Limiting magnitude in first mag (by default will be Kepler band)
If want to limit in different band, then you have to
got directly to the ``get_trilegal`` perl script.
:param binaries: (optional)
Whether to have TRILEGAL include binary stars. Default ``False``.
:param trilegal_version: (optional)
Default ``'1.6'``.
:param sigma_AV: (optional)
Fractional spread in A_V along the line of sight.
:param convert_h5: (optional)
If true, text file downloaded from TRILEGAL will be converted
into a ``pandas.DataFrame`` stored in an HDF file, with ``'df'``
path.
"""
if galactic:
l, b = ra, dec
else:
try:
c = SkyCoord(ra,dec)
except:
c = SkyCoord(ra,dec,unit='deg')
l,b = (c.galactic.l.value,c.galactic.b.value)
if os.path.isabs(filename):
folder = ''
if not re.search('\.dat$',filename):
outfile = '{}/{}.dat'.format(folder,filename)
else:
outfile = '{}/{}'.format(folder,filename)
NONMAG_COLS = ['Gc','logAge', '[M/H]', 'm_ini', 'logL', 'logTe', 'logg',
'm-M0', 'Av', 'm2/m1', 'mbol', 'Mact'] #all the rest are mags
AV = get_AV_infinity(l,b,frame='galactic')
print(AV)
if AV is not None:
if AV<=1.5:
trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,outfile)
#cmd = './get_trilegal %s %f %f %f %i %.3f %.2f %s 1 %.1f %s' % (trilegal_version,l,b,
# area,binaries,AV,sigma_AV,
# filterset,maglim,outfile)
#sp.Popen(cmd,shell=True).wait()
if convert_h5:
df = pd.read_table(outfile, sep='\s+', skipfooter=1, engine='python')
df = df.rename(columns={'#Gc':'Gc'})
for col in df.columns:
if col not in NONMAG_COLS:
df.rename(columns={col:'{}_mag'.format(col)},inplace=True)
if not re.search('\.h5$', filename):
h5file = '{}/{}.h5'.format(folder,filename)
else:
h5file = '{}/{}'.format(folder,filename)
df.to_hdf(h5file,'df')
with pd.HDFStore(h5file) as store:
attrs = store.get_storer('df').attrs
attrs.trilegal_args = {'version':trilegal_version,
'ra':ra, 'dec':dec,
'l':l,'b':b,'area':area,
'AV':AV, 'sigma_AV':sigma_AV,
'filterset':filterset,
'maglim':maglim,
'binaries':binaries}
os.remove(outfile)
else:
print('Skipping, AV > 10 or not found')
def trilegal_webcall(trilegal_version,l,b,area,binaries,AV,sigma_AV,filterset,maglim,
outfile):
"""Calls TRILEGAL webserver and downloads results file.
:param trilegal_version:
Version of trilegal (only tested on 1.6).
:param l,b:
Coordinates (galactic) for line-of-sight simulation.
:param area:
Area of TRILEGAL simulation [sq. deg]
:param binaries:
Whether to have TRILEGAL include binary stars. Default ``False``.
:param AV:
Extinction along the line of sight.
:param sigma_AV:
Fractional spread in A_V along the line of sight.
:param filterset: (optional)
Filter set for which to call TRILEGAL.
:param maglim:
Limiting magnitude in mag (by default will be 1st band of filterset)
If want to limit in different band, then you have to
change function directly.
:param outfile:
Desired output filename.
"""
webserver = 'http://stev.oapd.inaf.it'
args = [l,b,area,AV,sigma_AV,filterset,maglim,1,binaries]
mainparams = ('imf_file=tab_imf%2Fimf_chabrier_lognormal.dat&binary_frac=0.3&'
'binary_mrinf=0.7&binary_mrsup=1&extinction_h_r=100000&extinction_h_z='
'110&extinction_kind=2&extinction_rho_sun=0.00015&extinction_infty={}&'
'extinction_sigma={}&r_sun=8700&z_sun=24.2&thindisk_h_r=2800&'
'thindisk_r_min=0&thindisk_r_max=15000&thindisk_kind=3&thindisk_h_z0='
'95&thindisk_hz_tau0=4400000000&thindisk_hz_alpha=1.6666&'
'thindisk_rho_sun=59&thindisk_file=tab_sfr%2Ffile_sfr_thindisk_mod.dat&'
'thindisk_a=0.8&thindisk_b=0&thickdisk_kind=0&thickdisk_h_r=2800&'
'thickdisk_r_min=0&thickdisk_r_max=15000&thickdisk_h_z=800&'
'thickdisk_rho_sun=0.0015&thickdisk_file=tab_sfr%2Ffile_sfr_thickdisk.dat&'
'thickdisk_a=1&thickdisk_b=0&halo_kind=2&halo_r_eff=2800&halo_q=0.65&'
'halo_rho_sun=0.00015&halo_file=tab_sfr%2Ffile_sfr_halo.dat&halo_a=1&'
'halo_b=0&bulge_kind=2&bulge_am=2500&bulge_a0=95&bulge_eta=0.68&'
'bulge_csi=0.31&bulge_phi0=15&bulge_rho_central=406.0&'
'bulge_cutoffmass=0.01&bulge_file=tab_sfr%2Ffile_sfr_bulge_zoccali_p03.dat&'
'bulge_a=1&bulge_b=-2.0e9&object_kind=0&object_mass=1280&object_dist=1658&'
'object_av=1.504&object_avkind=1&object_cutoffmass=0.8&'
'object_file=tab_sfr%2Ffile_sfr_m4.dat&object_a=1&object_b=0&'
'output_kind=1').format(AV,sigma_AV)
cmdargs = [trilegal_version,l,b,area,filterset,1,maglim,binaries,mainparams,
webserver,trilegal_version]
cmd = ("wget -o lixo -Otmpfile --post-data='submit_form=Submit&trilegal_version={}"
"&gal_coord=1&gc_l={}&gc_b={}&eq_alpha=0&eq_delta=0&field={}&photsys_file="
"tab_mag_odfnew%2Ftab_mag_{}.dat&icm_lim={}&mag_lim={}&mag_res=0.1&"
"binary_kind={}&{}' {}/cgi-bin/trilegal_{}").format(*cmdargs)
complete = False
while not complete:
notconnected = True
busy = True
print("TRILEGAL is being called with \n l={} deg, b={} deg, area={} sqrdeg\n "
"Av={} with {} fractional r.m.s. spread \n in the {} system, complete down to "
"mag={} in its {}th filter, use_binaries set to {}.".format(*args))
sp.Popen(cmd,shell=True).wait()
if os.path.exists('tmpfile') and os.path.getsize('tmpfile')>0:
notconnected = False
else:
print("No communication with {}, will retry in 2 min".format(webserver))
time.sleep(120)
if not notconnected:
with open('tmpfile','r') as f:
lines = f.readlines()
for line in lines:
if 'The results will be available after about 2 minutes' in line:
busy = False
break
sp.Popen('rm -f lixo tmpfile',shell=True)
if not busy:
filenameidx = line.find('<a href=../tmp/') +15
fileendidx = line[filenameidx:].find('.dat')
filename = line[filenameidx:filenameidx+fileendidx+4]
print("retrieving data from {} ...".format(filename))
while not complete:
time.sleep(120)
modcmd = 'wget -o lixo -O{} {}/tmp/{}'.format(filename,webserver,filename)
modcall = sp.Popen(modcmd,shell=True).wait()
if os.path.getsize(filename)>0:
with open(filename,'r') as f:
lastline = f.readlines()[-1]
if 'normally' in lastline:
complete = True
print('model downloaded!..')
if not complete:
print('still running...')
else:
print('Server busy, trying again in 2 minutes')
time.sleep(120)
sp.Popen('mv {} {}'.format(filename,outfile),shell=True).wait()
print('results copied to {}'.format(outfile))
def get_AV_infinity(ra,dec,frame='icrs'):
"""
Gets the A_V exctinction at infinity for a given line of sight.
Queries the NED database using ``curl``.
.. note::
It would be desirable to rewrite this to avoid dependence
on ``curl``.
:param ra,dec:
Desired coordinates, in degrees.
:param frame: (optional)
Frame of input coordinates (e.g., ``'icrs', 'galactic'``)
"""
coords = SkyCoord(ra,dec,unit='deg',frame=frame).transform_to('icrs')
rah,ram,ras = coords.ra.hms
decd,decm,decs = coords.dec.dms
if decd > 0:
decsign = '%2B'
else:
decsign = '%2D'
url = 'http://ned.ipac.caltech.edu/cgi-bin/nph-calc?in_csys=Equatorial&in_equinox=J2000.0&obs_epoch=2010&lon='+'%i' % rah + \
'%3A'+'%i' % ram + '%3A' + '%05.2f' % ras + '&lat=%s' % decsign + '%i' % abs(decd) + '%3A' + '%i' % abs(decm) + '%3A' + '%05.2f' % abs(decs) + \
'&pa=0.0&out_csys=Equatorial&out_equinox=J2000.0'
tmpfile = '/tmp/nedsearch%s%s.html' % (ra,dec)
cmd = 'curl -s \'%s\' -o %s' % (url,tmpfile)
sp.Popen(cmd,shell=True).wait()
AV = None
try:
with open(tmpfile, 'r') as f:
for line in f:
m = re.search('V \(0.54\)\s+(\S+)',line)
if m:
AV = float(m.group(1))
os.remove(tmpfile)<|fim▁hole|> except:
logging.warning('Error accessing NED, url={}'.format(url))
return AV<|fim▁end|> | |
<|file_name|>ClearLogAction.java<|end_file_name|><|fim▁begin|>package com.reason.ide.console;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.icons.AllIcons;
import com.intellij.openapi.actionSystem.AnActionEvent;
import com.intellij.openapi.actionSystem.CommonDataKeys;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.project.DumbAwareAction;
import org.jetbrains.annotations.NotNull;
class ClearLogAction extends DumbAwareAction {<|fim▁hole|>
ClearLogAction(ConsoleView console) {
super("Clear All", "Clear the contents of the logs", AllIcons.Actions.GC);
m_console = console;
}
@Override
public void update(@NotNull AnActionEvent e) {
Editor editor = e.getData(CommonDataKeys.EDITOR);
e.getPresentation().setEnabled(editor != null && editor.getDocument().getTextLength() > 0);
}
@Override
public void actionPerformed(@NotNull AnActionEvent e) {
m_console.clear();
}
}<|fim▁end|> | private final ConsoleView m_console; |
<|file_name|>ch4_ex4.1.4.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Created by Samvel Khalatyan on Mar 23, 2014<|fim▁hole|># Copyright (c) 2014 Samvel Khalatyan. All rights reserved
#
# Use of this source code is governed by a license that can be found in
# the LICENSE file.
import random
import unittest
from lib import unigraph
class UnigraphExtra(unigraph.Unigraph):
def has_edge(self, left_vertex, right_vertex):
if left_vertex == right_vertex:
return True
else:
return right_vertex in self._vertices[left_vertex]
class UnigraphEdgeTestCase(unittest.TestCase):
def setUp(self):
self.graph = UnigraphExtra(random.randrange(10, 15))
for edge in range(2 * self.graph.vertices()):
f, t = (random.randrange(self.graph.vertices()) for x in range(2))
self.graph.add_edge(f, t)
def test_edge(self):
for vertex in range(self.graph.vertices()):
existing_vertices = set(self.graph._vertices[vertex])
all_vertices = set(range(self.graph.vertices()))
missing_vertices = all_vertices - all_vertices
for adj_vertex in existing_vertices:
self.assertTrue(self.graph.has_edge(vertex, adj_vertex))
for adj_vertex in missing_vertices:
self.assertFalse(self.graph.has_edge(vertex, adj_vertex))
def test_self_loop(self):
for vertex in range(self.graph.vertices()):
self.assertTrue(self.graph.has_edge(vertex, vertex))
if "__main__" == __name__:
unittest.main()<|fim▁end|> | |
<|file_name|>Element.ts<|end_file_name|><|fim▁begin|>/// <reference path="./../Element.ts" />
/// <reference path="./../../../elements/bdo/IElement.ts" />
/// <reference path="./../../../elements/bdo/IOptions.ts" />
module xlib.ui.element.adapters.browser.bdo {
import IEvent = elements.IEvent;
export class Element extends browser.Element implements element.elements.bdo.IElement<HTMLElement> {
constructor(options?: element.elements.bdo.IOptions<HTMLElement>) {
super(options);
if (options && xlib.typeOf(options.title) !== "undefined") {
this.setTitle(options.title);
}
if (options && xlib.typeOf(options.lang) !== "undefined") {
this.setLang(options.lang);
}
if (options && xlib.typeOf(options.xmlLang) !== "undefined") {
this.setXmlLang(options.xmlLang);
}
if (options && xlib.typeOf(options.dir) !== "undefined") {
this.setDir(options.dir);
}
}<|fim▁hole|> return super.attributesDeny().concat(["title", "lang", "xml:lang", "dir"]);
}
public attributesAllow(): string[] {
return super.attributesAllow().concat(["title", "lang", "xml:lang", "dir"]);
}
public allowChildren(): boolean {
return true;
}
public allowText(): boolean {
return true;
}
public allowHtml(): boolean {
return true;
}
public allowTags(): string[] {
return ["a", "abbr", "acronym", "b", "bdo", "big", "br", "button", "cite", "code", "del", "dfn", "em", "i", "img", "input", "ins", "kbd", "label", "map", "object", "q", "samp", "script", "select", "small", "span", "strong", "sub", "sup", "textarea", "tt", "var"];
}
public getTag(): string {
return "bdo";
}
public getTitle(): string {
var element: HTMLElement = this.element();
return String(element.getAttribute("title") || "") || null;
}
public setTitle(value: string): void {
var temp: string = String(value || ""),
element: HTMLElement = this.element();
if (temp === "") {
element.removeAttribute("title");
} else {
element.setAttribute("title", temp);
}
}
public getLang(): string {
var element: HTMLElement = this.element();
return String(element.getAttribute("lang") || "") || null;
}
public setLang(value: string): void {
var temp: string = String(value || ""),
element: HTMLElement = this.element();
if (temp === "") {
element.removeAttribute("lang");
} else {
element.setAttribute("lang", temp);
}
}
public getXmlLang(): string {
var element: HTMLElement = this.element();
// todo: use element.getAttributeNs() !!!
return String(element.getAttribute("xml:lang") || "") || null;
}
public setXmlLang(value: string): void {
var temp: string = String(value || ""),
element: HTMLElement = this.element();
if (temp === "") {
// todo: use element.removeAttributeNS() !!!
element.removeAttribute("xml:lang");
} else {
// todo: use element.setAttributeNS() !!!
element.setAttribute("xml:lang", temp);
}
}
public getDir(): string {
var element: HTMLElement = this.element();
return String(element.getAttribute("dir") || "") || null;
}
public setDir(value: string): void {
var temp: string = String(value || "").toLowerCase(),
element: HTMLElement = this.element();
if (temp === "") {
element.removeAttribute("dir");
} else {
if (["ltr", "rtl"].indexOf(temp) === -1) {
throw new Error("bla bla bla");
}
element.setAttribute("dir", temp);
}
}
public onClick(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("click", listener);
}
public onDblClick(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("dblclick", listener);
}
public onMouseDown(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("mousedown", listener);
}
public onMouseUp(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("mouseup", listener);
}
public onMouseOver(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("mouseover", listener);
}
public onMouseMove(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("mousemove", listener);
}
public onMouseOut(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("mouseout", listener);
}
public onKeyPress(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("keypress", listener);
}
public onKeyDown(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("keydown", listener);
}
public onKeyUp(listener: (event?: IEvent<HTMLElement>) => void): void {
this.on("keyup", listener);
}
}
}<|fim▁end|> |
public attributesDeny(): string[] {
// todo: adjust xml:lang |
<|file_name|>windowing.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Abstract windowing methods. The concrete implementations of these can be found in `platform/`.
use geom::point::TypedPoint2D;
use geom::scale_factor::ScaleFactor;
use geom::size::TypedSize2D;
use layers::geometry::DevicePixel;
use servo_msg::compositor_msg::{ReadyState, RenderState};
use servo_util::geometry::ScreenPx;
pub enum MouseWindowEvent {
MouseWindowClickEvent(uint, TypedPoint2D<DevicePixel, f32>),
MouseWindowMouseDownEvent(uint, TypedPoint2D<DevicePixel, f32>),
MouseWindowMouseUpEvent(uint, TypedPoint2D<DevicePixel, f32>),
}
pub enum WindowNavigateMsg {
Forward,
Back,
}
/// Events that the windowing system sends to Servo.
pub enum WindowEvent {
/// Sent when no message has arrived.
///
/// FIXME: This is a bogus event and is only used because we don't have the new
/// scheduler integrated with the platform event loop.
IdleWindowEvent,
/// Sent when part of the window is marked dirty and needs to be redrawn.
RefreshWindowEvent,
/// Sent when the window is resized.
ResizeWindowEvent(TypedSize2D<DevicePixel, uint>),
/// Sent when a new URL is to be loaded.
LoadUrlWindowEvent(String),
/// Sent when a mouse hit test is to be performed.
MouseWindowEventClass(MouseWindowEvent),
/// Sent when a mouse move.
MouseWindowMoveEventClass(TypedPoint2D<DevicePixel, f32>),
/// Sent when the user scrolls. Includes the current cursor position.
ScrollWindowEvent(TypedPoint2D<DevicePixel, f32>, TypedPoint2D<DevicePixel, i32>),
/// Sent when the user zooms.
ZoomWindowEvent(f32),
/// Simulated "pinch zoom" gesture for non-touch platforms (e.g. ctrl-scrollwheel).
PinchZoomWindowEvent(f32),
/// Sent when the user uses chrome navigation (i.e. backspace or shift-backspace).
NavigationWindowEvent(WindowNavigateMsg),
/// Sent when rendering is finished.
FinishedWindowEvent,
/// Sent when the user quits the application
QuitWindowEvent,
}
pub trait WindowMethods {
/// Returns the size of the window in hardware pixels.
fn framebuffer_size(&self) -> TypedSize2D<DevicePixel, uint>;
/// Returns the size of the window in density-independent "px" units.
fn size(&self) -> TypedSize2D<ScreenPx, f32>;
/// Presents the window to the screen (perhaps by page flipping).
fn present(&self);
<|fim▁hole|> /// Sets the ready state of the current page.
fn set_ready_state(&self, ready_state: ReadyState);
/// Sets the render state of the current page.
fn set_render_state(&self, render_state: RenderState);
/// Returns the hidpi factor of the monitor.
fn hidpi_factor(&self) -> ScaleFactor<ScreenPx, DevicePixel, f32>;
}<|fim▁end|> | /// Spins the event loop and returns the next event.
fn recv(&self) -> WindowEvent;
|
<|file_name|>app.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from flask import request
class View(object):
def __init__(self, core):
self.core = core<|fim▁hole|> def __call__(self, *args, **kwargs):
method = request.method.lower()
handler = getattr(self, method, None)
if callable(handler):
return handler(request=request, *args, **kwargs)
else:
return "Bad Request", 403
class Application(object):
def __init__(self, core):
self.core = core
def get_urls(self):
"""
Returns a list of tuples: (route, View)
"""
return []
def get_plugins(self):
"""
Returns a list of plugin classes
"""
return []<|fim▁end|> | |
<|file_name|>ElementShapeableTest.java<|end_file_name|><|fim▁begin|>package com.github.bachelorpraktikum.visualisierbar.model;
import com.github.bachelorpraktikum.visualisierbar.model.Element.State;
import com.github.bachelorpraktikum.visualisierbar.model.Element.Type;
public class ElementShapeableTest extends ShapeableImplementationTest {
<|fim▁hole|> @Override
protected Shapeable<?> getShapeable() {
Context context = new Context();
Node node = Node.in(context).create("node", new Coordinates(0, 0));
return Element.in(context).create("element", Type.HauptSignal, node, State.FAHRT);
}
}<|fim▁end|> | |
<|file_name|>hello_2_2.js<|end_file_name|><|fim▁begin|>console.log('argv[0]: '+process.argv[0]);
<|fim▁hole|><|fim▁end|> | console.log('argv[1]: '+process.argv[1]); |
<|file_name|>index.ts<|end_file_name|><|fim▁begin|>export * from './map-config.service';<|fim▁hole|>export * from './map-loader.service';<|fim▁end|> | export * from './esri-module-provider'; |
<|file_name|>knock-rgb-led-smd.py<|end_file_name|><|fim▁begin|>import RPi.GPIO as GPIO
KnockPin = 11
LedPin = 12
Led_status = 1
def setup():
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
GPIO.setup(LedPin, GPIO.OUT) # Set LedPin's mode is output
GPIO.setup(KnockPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.output(LedPin, GPIO.HIGH) # Set LedPin high(+3.3V) to off led
def swLed(ev=None):
global Led_status
Led_status = not Led_status
GPIO.output(LedPin, Led_status) # switch led status(on-->off; off-->on)
print "LED: " + ("on" if Led_status else "off")
def loop():
GPIO.add_event_detect(KnockPin, GPIO.FALLING, callback=swLed, bouncetime=200) # wait for falling
while True:
pass # Don't do anything
def destroy():
GPIO.output(LedPin, GPIO.LOW) # led off
GPIO.cleanup() # Release resource<|fim▁hole|> setup()
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()<|fim▁end|> |
if __name__ == '__main__': # Program start from here |
<|file_name|>fibonacci.js<|end_file_name|><|fim▁begin|><|fim▁hole|>/**
* 斐波那契数列
*/
export default function fibonacci(n){
if(n <= 2){
return 1;
}
let n1 = 1, n2 = 1, sn = 0;
for(let i = 0; i < n - 2; i ++){
sn = n1 + n2;
n1 = n2;
n2 = sn;
}
return sn;
}<|fim▁end|> | |
<|file_name|>Concentration.js<|end_file_name|><|fim▁begin|>/*
* Version 0.1.14
* Made By Robin Kuiper
* Skype: RobinKuiper.eu
* Discord: Atheos#1095
* My Discord Server: https://discord.gg/AcC9VME
* Roll20: https://app.roll20.net/users/1226016/robin
* Roll20 Wiki: https://wiki.roll20.net/Script:Concentration
* Roll20 Thread: https://app.roll20.net/forum/post/6364317/script-concentration/?pageforid=6364317#post-6364317
* Github: https://github.com/RobinKuiper/Roll20APIScripts
* Reddit: https://www.reddit.com/user/robinkuiper/
* Patreon: https://patreon.com/robinkuiper
* Paypal.me: https://www.paypal.me/robinkuiper
*/
var Concentration = Concentration || (function() {
'use strict';
let checked = [];
// Styling for the chat responses.
const styles = {
reset: 'padding: 0; margin: 0;',
menu: 'background-color: #fff; border: 1px solid #000; padding: 5px; border-radius: 5px;',
button: 'background-color: #000; border: 1px solid #292929; border-radius: 3px; padding: 5px; color: #fff; text-align: center;',
textButton: 'background-color: transparent; border: none; padding: 0; color: #000; text-decoration: underline',
list: 'list-style: none;',
float: {
right: 'float: right;',
left: 'float: left;'
},
overflow: 'overflow: hidden;',
fullWidth: 'width: 100%;'
},
script_name = 'Concentration',
state_name = 'CONCENTRATION',
markers = ['blue', 'brown', 'green', 'pink', 'purple', 'red', 'yellow', '-', 'all-for-one', 'angel-outfit', 'archery-target', 'arrowed', 'aura', 'back-pain', 'black-flag', 'bleeding-eye', 'bolt-shield', 'broken-heart', 'broken-shield', 'broken-skull', 'chained-heart', 'chemical-bolt', 'cobweb', 'dead', 'death-zone', 'drink-me', 'edge-crack', 'fishing-net', 'fist', 'fluffy-wing', 'flying-flag', 'frozen-orb', 'grab', 'grenade', 'half-haze', 'half-heart', 'interdiction', 'lightning-helix', 'ninja-mask', 'overdrive', 'padlock', 'pummeled', 'radioactive', 'rolling-tomb', 'screaming', 'sentry-gun', 'skull', 'sleepy', 'snail', 'spanner', 'stopwatch','strong', 'three-leaves', 'tread', 'trophy', 'white-tower'],
handleInput = (msg) => {
if(state[state_name].config.auto_add_concentration_marker && msg && msg.rolltemplate && msg.rolltemplate === 'spell' && (msg.content.includes("{{concentration=1}}"))){
handleConcentrationSpellCast(msg);
}
if (msg.type != 'api') return;
// Split the message into command and argument(s)
let args = msg.content.split(' ');
let command = args.shift().substring(1);
let extracommand = args.shift();
let message;
if (command == state[state_name].config.command) {
if(playerIsGM(msg.playerid)){
switch(extracommand){
case 'reset':
state[state_name] = {};
setDefaults(true);
sendConfigMenu(false, '<span style="color: red">The API Library needs to be restarted for this to take effect.</span>');
break;
case 'config':
if(args.length > 0){
let setting = args.shift().split('|');
let key = setting.shift();
let value = (setting[0] === 'true') ? true : (setting[0] === 'false') ? false : setting[0];
state[state_name].config[key] = value;
if(key === 'bar'){
//registerEventHandlers();
message = '<span style="color: red">The API Library needs to be restarted for this to take effect.</span>';
}
}
sendConfigMenu(false, message);
break;
case 'advantage-menu':
sendAdvantageMenu();
break;
case 'toggle-advantage':
let id = args[0];
if(state[state_name].advantages[id]){
state[state_name].advantages[id] = !state[state_name].advantages[id];
}else{
state[state_name].advantages[id] = true;
}
sendAdvantageMenu();
break;
case 'roll':
let represents = args[0],
DC = parseInt(args[1], 10),
con_save_mod = parseInt(args[2], 10),
name = args[3],
target = args[4];
roll(represents, DC, con_save_mod, name, target, false);
break;
case 'advantage':
let represents_a = args[0],
DC_a = parseInt(args[1], 10),
con_save_mod_a = parseInt(args[2], 10),
name_a = args[3],
target_a = args[4];
roll(represents_a, DC_a, con_save_mod_a, name_a, target_a, true);
break;
default:
if(msg.selected && msg.selected.length){
msg.selected.forEach(s => {
let token = getObj(s._type, s._id);
addConcentration(token, msg.playerid, extracommand);
});
return;
}
sendConfigMenu();
break;
}
}else{
if(msg.selected && msg.selected.length){
msg.selected.forEach(s => {
let token = getObj(s._type, s._id);
addConcentration(token, msg.playerid, extracommand);
});
}
}
}
},
addConcentration = (token, playerid, spell) => {
const marker = state[state_name].config.statusmarker
let character = getObj('character', token.get('represents'));
if((token.get('controlledby').split(',').includes(playerid) || token.get('controlledby').split(',').includes('all')) ||
(character && (character.get('controlledby').split(',').includes(playerid) || character.get('controlledby').split(',').includes('all'))) ||
playerIsGM(playerid)){
if(!token.get('status_'+marker)){
let target = state[state_name].config.send_reminder_to;
if(target === 'character'){
target = createWhisperName(character_name);
}else if(target === 'everyone'){
target = ''
}
let message;
if(spell){
message = '<b>'+token.get('name')+'</b> is now concentrating on <b>'+spell+'</b>.';
}else{
message = '<b>'+token.get('name')+'</b> is now concentrating.';
}
makeAndSendMenu(message, '', target);
}
token.set('status_'+marker, !token.get('status_'+marker));
}
},<|fim▁hole|> handleConcentrationSpellCast = (msg) => {
const marker = state[state_name].config.statusmarker
let character_name = msg.content.match(/charname=([^\n{}]*[^"\n{}])/);
character_name = RegExp.$1;
let spell_name = msg.content.match(/name=([^\n{}]*[^"\n{}])/);
spell_name = RegExp.$1;
let player = getObj('player', msg.playerid),
characterid = findObjs({ name: character_name, _type: 'character' }).shift().get('id'),
represented_tokens = findObjs({ represents: characterid, _type: 'graphic' }),
message,
target = state[state_name].config.send_reminder_to;
if(!character_name || !spell_name || !player || !characterid) return;
let search_attributes = {
represents: characterid,
_type: 'graphic',
_pageid: player.get('lastpage')
}
search_attributes['status_'+marker] = true;
let is_concentrating = (findObjs(search_attributes).length > 0);
if(is_concentrating){
message = '<b>'+character_name+'</b> is concentrating already.';
}else{
represented_tokens.forEach(token => {
let attributes = {};
attributes['status_'+marker] = true;
token.set(attributes);
message = '<b>'+character_name+'</b> is now concentrating on <b>'+spell_name+'</b>.';
});
}
if(target === 'character'){
target = createWhisperName(character_name);
}else if(target === 'everyone'){
target = ''
}
makeAndSendMenu(message, '', target);
},
handleStatusMarkerChange = (obj, prev) => {
const marker = state[state_name].config.statusmarker
if(!obj.get('status_'+marker)){
removeMarker(obj.get('represents'));
}
},
handleGraphicChange = (obj, prev) => {
if(checked.includes(obj.get('represents'))){ return false; }
let bar = 'bar'+state[state_name].config.bar+'_value',
target = state[state_name].config.send_reminder_to,
marker = state[state_name].config.statusmarker;
if(prev && obj.get('status_'+marker) && obj.get(bar) < prev[bar]){
let calc_DC = Math.floor((prev[bar] - obj.get(bar))/2),
DC = (calc_DC > 10) ? calc_DC : 10,
con_save_mod = parseInt(getAttrByName(obj.get('represents'), state[state_name].config.bonus_attribute, 'current')) || 0,
chat_text;
if(target === 'character'){
chat_text = "Make a Concentration Check - <b>DC " + DC + "</b>.";
target = createWhisperName(obj.get('name'));
}else if(target === 'everyone'){
chat_text = '<b>'+obj.get('name')+'</b> must make a Concentration Check - <b>DC ' + DC + '</b>.';
target = '';
}else{
chat_text = '<b>'+obj.get('name')+'</b> must make a Concentration Check - <b>DC ' + DC + '</b>.';
target = 'gm';
}
if(state[state_name].config.show_roll_button){
chat_text += '<hr>' + makeButton('Advantage', '!' + state[state_name].config.command + ' advantage ' + obj.get('represents') + ' ' + DC + ' ' + con_save_mod + ' ' + obj.get('name') + ' ' + target, styles.button + styles.float.right);
chat_text += ' ' + makeButton('Roll', '!' + state[state_name].config.command + ' roll ' + obj.get('represents') + ' ' + DC + ' ' + con_save_mod + ' ' + obj.get('name') + ' ' + target, styles.button + styles.float.left);
}
if(state[state_name].config.auto_roll_save){
//&{template:default} {{name='+obj.get('name')+' - Concentration Save}} {{Modifier='+con_save_mod+'}} {{Roll=[[1d20cf<'+(DC-con_save_mod-1)+'cs>'+(DC-con_save_mod-1)+'+'+con_save_mod+']]}} {{DC='+DC+'}}
roll(obj.get('represents'), DC, con_save_mod, obj.get('name'), target, state[state_name].advantages[obj.get('represents')]);
}else{
makeAndSendMenu(chat_text, '', target);
}
let length = checked.push(obj.get('represents'));
setTimeout(() => {
checked.splice(length-1, 1);
}, 1000);
}
},
roll = (represents, DC, con_save_mod, name, target, advantage) => {
sendChat(script_name, '[[1d20cf<'+(DC-con_save_mod-1)+'cs>'+(DC-con_save_mod-1)+'+'+con_save_mod+']]', results => {
let title = 'Concentration Save <br> <b style="font-size: 10pt; color: gray;">'+name+'</b>',
advantageRollResult;
let rollresult = results[0].inlinerolls[0].results.rolls[0].results[0].v;
let result = rollresult;
if(advantage){
advantageRollResult = randomInteger(20);
result = (rollresult <= advantageRollResult) ? advantageRollResult : rollresult;
}
let total = result + con_save_mod;
let success = total >= DC;
let result_text = (success) ? 'Success' : 'Failed',
result_color = (success) ? 'green' : 'red';
let rollResultString = (advantage) ? rollresult + ' / ' + advantageRollResult : rollresult;
let contents = ' \
<table style="width: 100%; text-align: left;"> \
<tr> \
<th>DC</th> \
<td>'+DC+'</td> \
</tr> \
<tr> \
<th>Modifier</th> \
<td>'+con_save_mod+'</td> \
</tr> \
<tr> \
<th>Roll Result</th> \
<td>'+rollResultString+'</td> \
</tr> \
</table> \
<div style="text-align: center"> \
<b style="font-size: 16pt;"> \
<span style="border: 1px solid '+result_color+'; padding-bottom: 2px; padding-top: 4px;">[['+result+'+'+con_save_mod+']]</span><br><br> \
'+result_text+' \
</b> \
</div>'
makeAndSendMenu(contents, title, target);
if(target !== '' && target !== 'gm'){
makeAndSendMenu(contents, title, 'gm');
}
if(!success){
removeMarker(represents);
}
});
},
removeMarker = (represents, type='graphic') => {
findObjs({ type, represents }).forEach(o => {
o.set('status_'+state[state_name].config.statusmarker, false);
});
},
createWhisperName = (name) => {
return name.split(' ').shift();
},
ucFirst = (string) => {
return string.charAt(0).toUpperCase() + string.slice(1);
},
sendConfigMenu = (first, message) => {
let markerDropdown = '?{Marker';
markers.forEach((marker) => {
markerDropdown += '|'+ucFirst(marker).replace('-', ' ')+','+marker
})
markerDropdown += '}';
let markerButton = makeButton(state[state_name].config.statusmarker, '!' + state[state_name].config.command + ' config statusmarker|'+markerDropdown, styles.button + styles.float.right),
commandButton = makeButton('!'+state[state_name].config.command, '!' + state[state_name].config.command + ' config command|?{Command (without !)}', styles.button + styles.float.right),
barButton = makeButton('bar ' + state[state_name].config.bar, '!' + state[state_name].config.command + ' config bar|?{Bar|Bar 1 (green),1|Bar 2 (blue),2|Bar 3 (red),3}', styles.button + styles.float.right),
sendToButton = makeButton(state[state_name].config.send_reminder_to, '!' + state[state_name].config.command + ' config send_reminder_to|?{Send To|Everyone,everyone|Character,character|GM,gm}', styles.button + styles.float.right),
addConMarkerButton = makeButton(state[state_name].config.auto_add_concentration_marker, '!' + state[state_name].config.command + ' config auto_add_concentration_marker|'+!state[state_name].config.auto_add_concentration_marker, styles.button + styles.float.right),
autoRollButton = makeButton(state[state_name].config.auto_roll_save, '!' + state[state_name].config.command + ' config auto_roll_save|'+!state[state_name].config.auto_roll_save, styles.button + styles.float.right),
//advantageButton = makeButton(state[state_name].config.advantage, '!' + state[state_name].config.command + ' config advantage|'+!state[state_name].config.advantage, styles.button + styles.float.right),
bonusAttrButton = makeButton(state[state_name].config.bonus_attribute, '!' + state[state_name].config.command + ' config bonus_attribute|?{Attribute|'+state[state_name].config.bonus_attribute+'}', styles.button + styles.float.right),
showRollButtonButton = makeButton(state[state_name].config.show_roll_button, '!' + state[state_name].config.command + ' config show_roll_button|'+!state[state_name].config.show_roll_button, styles.button + styles.float.right),
listItems = [
'<span style="'+styles.float.left+'">Command:</span> ' + commandButton,
'<span style="'+styles.float.left+'">Statusmarker:</span> ' + markerButton,
'<span style="'+styles.float.left+'">HP Bar:</span> ' + barButton,
'<span style="'+styles.float.left+'">Send Reminder To:</span> ' + sendToButton,
'<span style="'+styles.float.left+'">Auto Add Con. Marker: <p style="font-size: 8pt;">Works only for 5e OGL Sheet.</p></span> ' + addConMarkerButton,
'<span style="'+styles.float.left+'">Auto Roll Save:</span> ' + autoRollButton,
],
resetButton = makeButton('Reset', '!' + state[state_name].config.command + ' reset', styles.button + styles.fullWidth),
title_text = (first) ? script_name + ' First Time Setup' : script_name + ' Config';
/*if(state[state_name].config.auto_roll_save){
listItems.push('<span style="'+styles.float.left+'">Advantage:</span> ' + advantageButton);
}*/
if(state[state_name].config.auto_roll_save){
listItems.push('<span style="'+styles.float.left+'">Bonus Attribute:</span> ' + bonusAttrButton)
}
if(!state[state_name].config.auto_roll_save){
listItems.push('<span style="'+styles.float.left+'">Roll Button:</span> ' + showRollButtonButton);
}
let advantageMenuButton = (state[state_name].config.auto_roll_save) ? makeButton('Advantage Menu', '!' + state[state_name].config.command + ' advantage-menu', styles.button + styles.fullWidth) : '';
message = (message) ? '<p>'+message+'</p>' : '';
let contents = message+makeList(listItems, styles.reset + styles.list + styles.overflow, styles.overflow)+'<br>'+advantageMenuButton+'<hr><p style="font-size: 80%">You can always come back to this config by typing `!'+state[state_name].config.command+' config`.</p><hr>'+resetButton;
makeAndSendMenu(contents, title_text, 'gm');
},
sendAdvantageMenu = () => {
let menu_text = "";
let characters = findObjs({ type: 'character' }).sort((a, b) => {
let nameA = a.get('name').toUpperCase();
let nameB = b.get('name').toUpperCase();
if(nameA < nameB) return -1;
if(nameA > nameB) return 1;
return 0;
});
characters.forEach(character => {
let name = (state[state_name].advantages && state[state_name].advantages[character.get('id')]) ? '<b>'+character.get('name')+'</b>' : character.get('name');
menu_text += makeButton(name, '!' + state[state_name].config.command + ' toggle-advantage ' + character.get('id'), styles.textButton) + '<br>';
});
makeAndSendMenu(menu_text, 'Advantage Menu', 'gm');
},
makeAndSendMenu = (contents, title, whisper, callback) => {
title = (title && title != '') ? makeTitle(title) : '';
whisper = (whisper && whisper !== '') ? '/w ' + whisper + ' ' : '';
sendChat(script_name, whisper + '<div style="'+styles.menu+styles.overflow+'">'+title+contents+'</div>', null, {noarchive:true});
},
makeTitle = (title) => {
return '<h3 style="margin-bottom: 10px;">'+title+'</h3>';
},
makeButton = (title, href, style) => {
return '<a style="'+style+'" href="'+href+'">'+title+'</a>';
},
makeList = (items, listStyle, itemStyle) => {
let list = '<ul style="'+listStyle+'">';
items.forEach((item) => {
list += '<li style="'+itemStyle+'">'+item+'</li>';
});
list += '</ul>';
return list;
},
pre_log = (message) => {
log('---------------------------------------------------------------------------------------------');
if(!message){ return; }
log(message);
log('---------------------------------------------------------------------------------------------');
},
checkInstall = () => {
if(!_.has(state, state_name)){
state[state_name] = state[state_name] || {};
}
setDefaults();
log(script_name + ' Ready! Command: !'+state[state_name].config.command);
if(state[state_name].config.debug){ makeAndSendMenu(script_name + ' Ready! Debug On.', '', 'gm') }
},
registerEventHandlers = () => {
on('chat:message', handleInput);
on('change:graphic:bar'+state[state_name].config.bar+'_value', handleGraphicChange);
on('change:graphic:statusmarkers', handleStatusMarkerChange);
},
setDefaults = (reset) => {
const defaults = {
config: {
command: 'concentration',
statusmarker: 'stopwatch',
bar: 1,
send_reminder_to: 'everyone', // character,gm,
auto_add_concentration_marker: true,
auto_roll_save: true,
advantage: false,
bonus_attribute: 'constitution_save_bonus',
show_roll_button: true
},
advantages: {}
};
if(!state[state_name].config){
state[state_name].config = defaults.config;
}else{
if(!state[state_name].config.hasOwnProperty('command')){
state[state_name].config.command = defaults.config.command;
}
if(!state[state_name].config.hasOwnProperty('statusmarker')){
state[state_name].config.statusmarker = defaults.config.statusmarker;
}
if(!state[state_name].config.hasOwnProperty('bar')){
state[state_name].config.bar = defaults.config.bar;
}
if(!state[state_name].config.hasOwnProperty('send_reminder_to')){
state[state_name].config.send_reminder_to = defaults.config.send_reminder_to;
}
if(!state[state_name].config.hasOwnProperty('auto_add_concentration_marker')){
state[state_name].config.auto_add_concentration_marker = defaults.config.auto_add_concentration_marker;
}
if(!state[state_name].config.hasOwnProperty('auto_roll_save')){
state[state_name].config.auto_roll_save = defaults.config.auto_roll_save;
}
if(!state[state_name].config.hasOwnProperty('advantage')){
state[state_name].config.advantage = defaults.config.advantage;
}
if(!state[state_name].config.hasOwnProperty('bonus_attribute')){
state[state_name].config.bonus_attribute = defaults.config.bonus_attribute;
}
if(!state[state_name].config.hasOwnProperty('show_roll_button')){
state[state_name].config.show_roll_button = defaults.config.show_roll_button;
}
}
if(!state[state_name].advantages){
state[state_name].advantages = defaults.advantages;
}
if(!state[state_name].config.hasOwnProperty('firsttime') && !reset){
sendConfigMenu(true);
state[state_name].config.firsttime = false;
}
};
return {
CheckInstall: checkInstall,
RegisterEventHandlers: registerEventHandlers
}
})();
on('ready',function() {
'use strict';
Concentration.CheckInstall();
Concentration.RegisterEventHandlers();
});<|fim▁end|> | |
<|file_name|>requirements.js<|end_file_name|><|fim▁begin|>'use strict';
/**
* The basic http module, used to create the server.
*
* @link http://nodejs.org/api/http.html
*/
alchemy.use('http', 'http');
/**
* This module contains utilities for handling and transforming file paths.
* Almost all these methods perform only string transformations.
* The file system is not consulted to check whether paths are valid.
*
* @link http://nodejs.org/api/path.html
*/
alchemy.use('path', 'path');
/**
* File I/O is provided by simple wrappers around standard POSIX functions.<|fim▁hole|> */
alchemy.use('graceful-fs', 'fs');
/**
* Usefull utilities.
*
* @link http://nodejs.org/api/util.html
*/
alchemy.use('util', 'util');
/**
* The native mongodb library
*
* @link https://npmjs.org/package/mongodb
*/
alchemy.use('mongodb', 'mongodb');
/**
* The LESS interpreter.
*
* @link https://npmjs.org/package/less
*/
alchemy.use('less', 'less');
/**
* Hawkejs view engine
*
* @link https://npmjs.org/package/hawkejs
*/
alchemy.use('hawkejs', 'hawkejs');
alchemy.hawkejs = new Classes.Hawkejs.Hawkejs;
/**
* The function to detect when everything is too busy
*/
alchemy.toobusy = alchemy.use('toobusy-js', 'toobusy');
// If the config is a number, use that as the lag threshold
if (typeof alchemy.settings.toobusy === 'number') {
alchemy.toobusy.maxLag(alchemy.settings.toobusy);
}
/**
* Load Sputnik, the stage-based launcher
*/
alchemy.sputnik = new (alchemy.use('sputnik', 'sputnik'))();
/**
* Real-time apps made cross-browser & easy with a WebSocket-like API.
*
* @link https://npmjs.org/package/socket.io
*/
alchemy.use('socket.io', 'io');
/**
* Recursively mkdir, like `mkdir -p`.
* This is a requirement fetched from express
*
* @link https://npmjs.org/package/mkdirp
*/
alchemy.use('mkdirp', 'mkdirp');
/**
* Base useragent library
*
* @link https://npmjs.org/package/useragent
*/
alchemy.use('useragent');
/**
* Enable the `satisfies` method in the `useragent` library
*
* @link https://www.npmjs.com/package/useragent#adding-more-features-to-the-useragent
*/
require('useragent/features');<|fim▁end|> | *
* @link http://nodejs.org/api/fs.html |
<|file_name|>issue-65846-rollback-gating-failing-matcher.rs<|end_file_name|><|fim▁begin|>// run-pass
// Test that failing macro matchers will not cause pre-expansion errors
// even though they use a feature that is pre-expansion gated.
macro_rules! m {
($e:expr) => { 0 }; // This fails on the input below due to `, foo`.<|fim▁hole|> (box $e:expr, foo) => { 2 }; // Successful matcher, we should get `2`.
}
fn main() {
assert_eq!(2, m!(box 42, foo));
}<|fim▁end|> | ($e:expr,) => { 1 }; // This also fails to match due to `foo`. |
<|file_name|>playground-empty.rs<|end_file_name|><|fim▁begin|>#![crate_name = "foo"]
#![doc(html_playground_url = "")]
// compile-flags:-Z unstable-options --playground-url https://play.rust-lang.org/
<|fim▁hole|>//! ```
//! println!("Hello, world!");
//! ```
// @!has foo/index.html '//a[@class="test-arrow"]' "Run"<|fim▁end|> | //! module docs
//! |
<|file_name|>gnu.py<|end_file_name|><|fim▁begin|>from __future__ import division, absolute_import, print_function
import re
import os
import sys
import warnings
import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
def is_win32():
return sys.platform == "win32" and platform.architecture()[0] == "32bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
version_string = version_string[version_string.find('\n')+1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
# gfortrans may still return long version strings (``-dumpversion`` was
# an alias for ``--version``)
if len(version_string) <= 20:
# Try to find a valid version string
m = re.search(r'([0-9.]+)', version_string)
if m:
# g77 provides a longer version string that starts with GNU
# Fortran
if version_string.startswith('GNU Fortran'):
return ('g77', m.group(1))
# gfortran only outputs a version string such as #.#.#, so check
# if the match is at the start of the string
elif m.start() == 0:
return ('gfortran', m.group(1))
else:
# Output probably from --version, try harder:
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
# the '0' is for early g77's
return ('g77', v)
else:
# at some point in the 4.x series, the ' 95' was dropped
# from the version string
return ('gfortran', v)
# If still nothing, raise an error to make the problem easy to find.
err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'g77':
return None
return v[1]
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
}
module_dir_switch = None
module_include_switch = None
# Cygwin: f771: warning: -fPIC ignored for target (all code is
# position independent)
if os.name != 'nt' and sys.platform != 'cygwin':
pic_flags = ['-fPIC']
# use -mno-cygwin for g77 when Python is not Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'linker_so', 'linker_exe']:
executables[key].append('-mno-cygwin')
g2c = 'g2c'
suggested_f90_compiler = 'gnu95'
def get_flags_linker_so(self):
opt = self.linker_so[1:]
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
# Makefile used to build Python. We let disutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
# we try to get it first from the Python Makefile and then we
# fall back to setting it to 10.3 to maximize the set of
# versions we can work with. This is a reasonable default
# even when using the official Python dist and those derived
# from it.
import distutils.sysconfig as sc
g = {}
filename = sc.get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
if target == '10.3':
s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3'
warnings.warn(s)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared -Wl,-gc-sections -Wl,-s")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
# ignore the problem, use the -mimpure-text flag. It isn't
# the safest thing, but seems to work. 'man gcc' says:
# ".. Instead of using -mimpure-text, you should compile all
# source code with -fpic or -fPIC."
opt.append('-mimpure-text')
return opt
def get_libgcc_dir(self):
status, output = exec_command(self.compiler_f77 +
['-print-libgcc-file-name'],
use_tee=0)
if not status:
return os.path.dirname(output)
return None
def get_library_dirs(self):
opt = []
if sys.platform[:5] != 'linux':
d = self.get_libgcc_dir()
if d:
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
root = os.path.join(d, *((os.pardir,)*4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
def get_libraries(self):
opt = []
d = self.get_libgcc_dir()
if d is not None:
g2c = self.g2c + '-pic'
f = self.static_lib_format % (g2c, self.static_lib_extension)
if not os.path.isfile(os.path.join(d, f)):
g2c = self.g2c
else:
g2c = self.g2c
if g2c is not None:
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
runtime_lib = msvc_runtime_library()
if runtime_lib:
opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
v = self.get_version()
if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
elif v and v >= '4.6.0':
if is_win32():
# use -mincoming-stack-boundary=2
# due to the change to 16 byte stack alignment since GCC 4.6
# but 32 bit Windows ABI defines 4 bytes stack alignment
opt = ['-O2 -march=core2 -mtune=generic -mfpmath=sse -msse2'
'-mincoming-stack-boundary=2']
else:
opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2']
else:
opt = ['-O2']
return opt
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
from distutils import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
return []
arch_re = re.compile(r"-arch\s+(\w+)")
arch_flags = []
for arch in arch_re.findall(cflags):
arch_flags += ['-arch', arch]
return arch_flags
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath="%s"' % dir
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
compiler_aliases = ('gfortran',)
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
v = self.gnu_version_match(version_string)
if not v or v[0] != 'gfortran':
return None
v = v[1]
if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_f90' : [None, "-Wall", "-g",
"-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
"-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
}
module_dir_switch = '-J'
module_include_switch = '-I'
g2c = 'gfortran'
def _universal_flags(self, cmd):
"""Return a list of -arch flags for every supported architecture."""
if not sys.platform == 'darwin':
return []
arch_flags = []
# get arches the C compiler gets.
c_archs = self._c_arch_flags()
if "i386" in c_archs:
c_archs[c_archs.index("i386")] = "i686"
# check the arches the Fortran compiler supports, and compare with
# arch flags from C compiler
for arch in ["ppc", "i686", "x86_64", "ppc64"]:
if _can_target(cmd, arch) and arch in c_archs:
arch_flags.extend(["-arch", arch])
return arch_flags
def get_flags(self):
flags = GnuFCompiler.get_flags(self)
arch_flags = self._universal_flags(self.compiler_f90)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_flags_linker_so(self):
flags = GnuFCompiler.get_flags_linker_so(self)
arch_flags = self._universal_flags(self.linker_so)
if arch_flags:
flags[:0] = arch_flags
return flags
def get_library_dirs(self):
opt = GnuFCompiler.get_library_dirs(self)
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
path = os.path.join(root, target, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
def get_libraries(self):
opt = GnuFCompiler.get_libraries(self)
if sys.platform == 'darwin':
opt.remove('cc_dynamic')
if sys.platform == 'win32':
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
# XXX: fix this mess, does not work for mingw
if is_win64():
c_compiler = self.c_compiler
if c_compiler and c_compiler.compiler_type == "msvc":
return []
else:
pass
return opt
def get_target(self):
status, output = exec_command(self.compiler_f77 +
['-v'],
use_tee=0)
if not status:<|fim▁hole|> if m:
return m.group(1)
return ""
def get_flags_opt(self):
return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
d = os.path.dirname(filename)
output = os.path.splitext(filename)[0] + ".o"
try:
newcmd.extend(["-arch", arch, "-c", filename])
p = Popen(newcmd, stderr=STDOUT, stdout=PIPE, cwd=d)
p.communicate()
return p.returncode == 0
finally:
if os.path.exists(output):
os.remove(output)
finally:
os.remove(filename)
return False
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
try:
compiler = GnuFCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)
try:
compiler = Gnu95FCompiler()
compiler.customize()
print(compiler.get_version())
except Exception:
msg = get_exception()
print(msg)<|fim▁end|> | m = TARGET_R.search(output) |
<|file_name|>treenode.spec.js<|end_file_name|><|fim▁begin|>var TreeNode = require('../dist/treenode').TreeNode;
describe("TreeNode Class", function() {
var tree;
var data = [
{
name: 'I am ROOT',
status: 'initial'
},
{
name: 'Node 1',
status: 'initial'
},
{
name: 'Node 2',
status: 'initial'
},
{
name: 'Node 3',
status: 'initial'
}
];
beforeEach(function() {
tree = new TreeNode(data[0]);
});
it("should allow a child to be added and return as a TreeNode object", function() {
var leaf = tree.addChild(data[1]);
expect(leaf.data.name).toEqual(data[1].name);
});
it("should return its root", function() {
expect(tree.root().data.name).toEqual(data[0].name);
var leaf = tree.addChild(data[1]);
expect(leaf.root().data.name).toEqual(data[0].name)
});
it("should find data", function() {
tree.addChild(data[1]);
tree.addChild(data[2]);
expect(tree.find(data[1]).data).toEqual(data[1]);<|fim▁hole|> expect(tree.find(data[3])).toBe(null);
});
it("should find leaves", function() {
tree.addChild(data[1]);
var intermediateNode = tree.addChild(data[2]);
intermediateNode.addChild(data[3]);
var leaves = tree.leaves();
// we've added 3 nodes, but only two are leaves
expect(leaves.length).toBe(2);
});
it("should execute forEach() callback on all child nodes", function() {
var intermediateNode = tree.addChild(data[1]);
var childNode = intermediateNode.addChild(data[2]);
var grandchildNode = childNode.addChild(data[3]);
intermediateNode.forEach(function(node) {
node.data.status = 'updated';
});
expect(tree.root().data.status).toBe('initial');
expect(intermediateNode.data.status).toBe('updated');
expect(childNode.data.status).toBe('updated');
expect(grandchildNode.data.status).toBe('updated');
});
it("should return the number of children", function() {
expect(tree.numChildren()).toBe(0);
tree.addChild(data[1]);
expect(tree.numChildren()).toBe(1);
var intermediateNode = tree.addChild(data[2]);
expect(tree.numChildren()).toBe(2);
intermediateNode.addChild(data[3]);
expect(tree.numChildren()).toBe(2);
expect(intermediateNode.numChildren()).toBe(1);
});
});<|fim▁end|> | expect(tree.find(data[2]).data).toEqual(data[2]); |
<|file_name|>hero.service.ts<|end_file_name|><|fim▁begin|>import {Injectable} from '@angular/core';
//import {HEROES} from './mock-heroes';
import {Http,Headers} from '@angular/http';
import 'rxjs/add/operator/toPromise';
import {Hero} from './model/Hero';
@Injectable()
export class HeroService {
private hero:Hero;
private heroesApiUrl = 'api/heroes'; //URL to web api
private headers = new Headers({'Content-Type': 'application/json'});
constructor(private http:Http){
}
getHeroes():Promise<Hero[]>{
//return Promise.resolve(HEROES);
return this.http.get(this.heroesApiUrl)
.toPromise()
.then(response => response.json().data as Hero[])
.catch(this.handleError);
}
getHeroesSlowly() {
//return new Promise<Hero[]>(resolve =>
//setTimeout(() => resolve(HEROES), 4000)); // 4 seconds
}
getHero(heroId: number | string) {
return this.getHeroes()
.then(heroes => heroes.find(hero => hero.id === heroId));
}
update(hero: Hero): Promise<Hero> {
const url = `${this.heroesApiUrl}/${hero.id}`;<|fim▁hole|> .toPromise()
.then(() => hero)
.catch(this.handleError);
}
private handleError(error: any): Promise<any> {
console.error('An error occurred', error); // for demo purposes only
return Promise.reject(error.message || error);
}
create(name:string):Promise<Hero>{
return this.http
.post(this.heroesApiUrl, JSON.stringify({name: name}), {headers: this.headers})
.toPromise()
.then(res => res.json().data)
.catch(this.handleError);
}
}<|fim▁end|> | return this.http
.put(url, JSON.stringify(hero), {headers: this.headers}) |
<|file_name|>input.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from nodes import Node
class Input(Node):
char = "z"
args = 0
results = 1<|fim▁hole|> return input() or Input.contents<|fim▁end|> | contents = ""
def func(self):
"""input() or Input.contents""" |
<|file_name|>confParser.py<|end_file_name|><|fim▁begin|>import string
from pyparsing import (
Literal, White, Word, alphanums, CharsNotIn, Forward, Group, SkipTo,
Optional, OneOrMore, ZeroOrMore, pythonStyleComment)
class Parser(object):
left_bracket = Literal("{").suppress()
right_bracket = Literal("}").suppress()
semicolon = Literal(";").suppress()
space = White().suppress()
key = Word(alphanums + "_/")
value = CharsNotIn("{};")
value2 = CharsNotIn(";")
location = CharsNotIn("{};," + string.whitespace)
ifword = Literal("if")
setword = Literal("set")
modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~")
assignment = (key + Optional(space + value) + semicolon)
setblock = (setword + OneOrMore(space + value2) + semicolon)
block = Forward()
ifblock = Forward()
subblock = Forward()
ifblock << (
ifword
+ SkipTo('{')
+ left_bracket
+ subblock
+ right_bracket)
subblock << ZeroOrMore(
Group(assignment) | block | ifblock | setblock
)
block << Group(
Group(key + Optional(space + modifier) + Optional(space + location))
+ left_bracket
+ Group(subblock)
+ right_bracket
)
script = OneOrMore(Group(assignment) | block).ignore(pythonStyleComment)
def __init__(self, source):
self.source = source
def parse(self):
return self.script.parseString(self.source)
def as_list(self):
return self.parse().asList()
class Dumper(object):
def __init__(self, blocks, indentation=4):
self.blocks = blocks
self.indentation = indentation
def __iter__(self, blocks=None, current_indent=0, spacer=' '):<|fim▁hole|> blocks = blocks or self.blocks
for key, values in blocks:
if current_indent:
yield spacer
indentation = spacer * current_indent
if isinstance(key, list):
yield indentation + spacer.join(key) + ' {'
for parameter in values:
if isinstance(parameter[0], list):
dumped = self.__iter__(
[parameter],
current_indent + self.indentation)
for line in dumped:
yield line
else:
dumped = spacer.join(parameter) + ';'
yield spacer * (
current_indent + self.indentation) + dumped
yield indentation + '}'
else:
yield spacer * current_indent + key + spacer + values + ';'
def as_string(self):
return '\n'.join(self)
def to_file(self, out):
for line in self:
out.write(line+"\n")
out.close()
return out
def loads(source):
return Parser(source).as_list()
def load(_file):
return loads(_file.read())
def dumps(blocks, indentation=4):
return Dumper(blocks, indentation).as_string()
def dump(blocks, _file, indentation=4):
return Dumper(blocks, indentation).to_file(_file)<|fim▁end|> | |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
/// A random number generator which shares one instance of an `OsRng`.
///
/// A problem with `OsRng`, which is inherited by `StdRng` and so
/// `ThreadRng`, is that it reads from `/dev/random`, and so consumes
/// a file descriptor. For multi-threaded applications like Servo,
/// it is easy to exhaust the supply of file descriptors this way.
///
/// This crate fixes that, by only using one `OsRng`, which is just
/// used to seed and re-seed an `ServoRng`.
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[cfg(target_pointer_width = "64")]
use rand::isaac::Isaac64Rng as IsaacWordRng;
#[cfg(target_pointer_width = "32")]
use rand::isaac::IsaacRng as IsaacWordRng;
use rand::os::OsRng;
use rand::reseeding::{Reseeder, ReseedingRng};
pub use rand::{Rand, Rng, SeedableRng};
use std::cell::RefCell;
use std::mem;
use std::rc::Rc;
use std::sync::Mutex;
use std::u64;
use uuid::Uuid;
// Slightly annoying having to cast between sizes.
#[cfg(target_pointer_width = "64")]
fn as_isaac_seed(seed: &[usize]) -> &[u64] {
unsafe { mem::transmute(seed) }
}
#[cfg(target_pointer_width = "32")]
fn as_isaac_seed(seed: &[usize]) -> &[u32] {
unsafe { mem::transmute(seed) }
}
// The shared RNG which may hold on to a file descriptor
lazy_static! {
static ref OS_RNG: Mutex<OsRng> = match OsRng::new() {
Ok(r) => Mutex::new(r),
Err(e) => panic!("Failed to seed OsRng: {}", e),
};
}
// Generate 32K of data between reseedings
const RESEED_THRESHOLD: u64 = 32_768;
// An in-memory RNG that only uses the shared file descriptor for seeding and reseeding.
pub struct ServoRng {
rng: ReseedingRng<IsaacWordRng, ServoReseeder>,
}
impl Rng for ServoRng {
#[inline]<|fim▁hole|> #[inline]
fn next_u64(&mut self) -> u64 {
self.rng.next_u64()
}
}
impl<'a> SeedableRng<&'a [usize]> for ServoRng {
/// Create a manually-reseeding instane of `ServoRng`.
///
/// Note that this RNG does not reseed itself, so care is needed to reseed the RNG
/// is required to be cryptographically sound.
fn from_seed(seed: &[usize]) -> ServoRng {
trace!("Creating new manually-reseeded ServoRng.");
let isaac_rng = IsaacWordRng::from_seed(as_isaac_seed(seed));
let reseeding_rng = ReseedingRng::new(isaac_rng, u64::MAX, ServoReseeder);
ServoRng { rng: reseeding_rng }
}
/// Reseed the RNG.
fn reseed(&mut self, seed: &'a [usize]) {
trace!("Manually reseeding ServoRng.");
self.rng.reseed((ServoReseeder, as_isaac_seed(seed)))
}
}
impl ServoRng {
/// Create an auto-reseeding instance of `ServoRng`.
///
/// This uses the shared `OsRng`, so avoids consuming
/// a file descriptor.
pub fn new() -> ServoRng {
trace!("Creating new ServoRng.");
let mut os_rng = OS_RNG.lock().expect("Poisoned lock.");
let isaac_rng = IsaacWordRng::rand(&mut *os_rng);
let reseeding_rng = ReseedingRng::new(isaac_rng, RESEED_THRESHOLD, ServoReseeder);
ServoRng { rng: reseeding_rng }
}
}
// The reseeder for the in-memory RNG.
struct ServoReseeder;
impl Reseeder<IsaacWordRng> for ServoReseeder {
fn reseed(&mut self, rng: &mut IsaacWordRng) {
trace!("Reseeding ServoRng.");
let mut os_rng = OS_RNG.lock().expect("Poisoned lock.");
*rng = IsaacWordRng::rand(&mut *os_rng);
}
}
impl Default for ServoReseeder {
fn default() -> ServoReseeder {
ServoReseeder
}
}
// A thread-local RNG, designed as a drop-in replacement for rand::ThreadRng.
#[derive(Clone)]
pub struct ServoThreadRng {
rng: Rc<RefCell<ServoRng>>,
}
// A thread-local RNG, designed as a drop-in replacement for rand::thread_rng.
pub fn thread_rng() -> ServoThreadRng {
SERVO_THREAD_RNG.with(|t| t.clone())
}
thread_local! {
static SERVO_THREAD_RNG: ServoThreadRng = ServoThreadRng { rng: Rc::new(RefCell::new(ServoRng::new())) };
}
impl Rng for ServoThreadRng {
fn next_u32(&mut self) -> u32 {
self.rng.borrow_mut().next_u32()
}
fn next_u64(&mut self) -> u64 {
self.rng.borrow_mut().next_u64()
}
#[inline]
fn fill_bytes(&mut self, bytes: &mut [u8]) {
self.rng.borrow_mut().fill_bytes(bytes)
}
}
// Generates a random value using the thread-local random number generator.
// A drop-in replacement for rand::random.
#[inline]
pub fn random<T: Rand>() -> T {
thread_rng().gen()
}
// TODO(eijebong): Replace calls to this by random once `uuid::Uuid` implements `rand::Rand` again.
#[inline]
pub fn random_uuid() -> Uuid {
let mut bytes = [0; 16];
thread_rng().fill_bytes(&mut bytes);
Uuid::from_random_bytes(bytes)
}<|fim▁end|> | fn next_u32(&mut self) -> u32 {
self.rng.next_u32()
}
|
<|file_name|>test_propagation.py<|end_file_name|><|fim▁begin|>import numpy as np
from dipy.data import default_sphere
from dipy.tracking.propspeed import ndarray_offset, eudx_both_directions
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_raises, run_module_suite)
def stepped_1d(arr_1d):<|fim▁hole|>def test_offset():
# Test ndarray_offset function
for dt in (np.int32, np.float64):
index = np.array([1, 1], dtype=np.intp)
A = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 3]], dtype=dt)
strides = np.array(A.strides, np.intp)
i_size = A.dtype.itemsize
assert_equal(ndarray_offset(index, strides, 2, i_size), 4)
assert_equal(A.ravel()[4], A[1, 1])
# Index and strides arrays must be C-continuous. Test this is enforced
# by using non-contiguous versions of the input arrays.
assert_raises(ValueError, ndarray_offset,
stepped_1d(index), strides, 2, i_size)
assert_raises(ValueError, ndarray_offset,
index, stepped_1d(strides), 2, i_size)
def test_eudx_both_directions_errors():
# Test error conditions for both directions function
sphere = default_sphere
seed = np.zeros(3, np.float64)
qa = np.zeros((4, 5, 6, 7), np.float64)
ind = qa.copy()
# All of seed, qa, ind, odf_vertices must be C-contiguous. Check by
# passing in versions that aren't C contiguous
assert_raises(ValueError, eudx_both_directions,
stepped_1d(seed), 0, qa, ind, sphere.vertices, 0.5, 0.1,
1., 1., 2)
assert_raises(ValueError, eudx_both_directions,
seed, 0, qa[..., ::2], ind, sphere.vertices, 0.5, 0.1,
1., 1., 2)
assert_raises(ValueError, eudx_both_directions,
seed, 0, qa, ind[..., ::2], sphere.vertices, 0.5, 0.1,
1., 1., 2)
assert_raises(ValueError, eudx_both_directions,
seed, 0, qa, ind, sphere.vertices[::2], 0.5, 0.1,
1., 1., 2)
if __name__ == '__main__':
run_module_suite()<|fim▁end|> | # Make a version of `arr_1d` which is not contiguous
return np.vstack((arr_1d, arr_1d)).ravel(order='F')[::2]
|
<|file_name|>fmt_tbl.rs<|end_file_name|><|fim▁begin|>// SPDX-License-Identifier: MIT
// Copyright [email protected]
// Copyright iced contributors
use super::super::super::data_reader::DataReader;
use super::super::super::iced_constants::IcedConstants;
use super::super::pseudo_ops::get_pseudo_ops;
use super::super::strings_tbl::get_strings_table;
use super::enums::*;
use super::fmt_data::FORMATTER_TBL_DATA;
use super::info::*;
use alloc::boxed::Box;
use alloc::string::String;
use alloc::vec::Vec;
use core::{mem, u32};
lazy_static! {
pub(super) static ref ALL_INFOS: Vec<Box<dyn InstrInfo + Sync + Send>> = read();
}
fn add_suffix(s: &str, c: char) -> String {
if c == '\0' {
String::from(s)
} else {
let mut res = String::with_capacity(s.len() + 1);
res.push_str(s);
res.push(c);
res
}
}
fn read() -> Vec<Box<dyn InstrInfo + Sync + Send>> {
let mut infos: Vec<Box<dyn InstrInfo + Sync + Send>> = Vec::with_capacity(IcedConstants::CODE_ENUM_COUNT);
let mut reader = DataReader::new(FORMATTER_TBL_DATA);
let strings = get_strings_table();
let mut prev_index = -1isize;
for i in 0..IcedConstants::CODE_ENUM_COUNT {
let f = reader.read_u8();
let mut ctor_kind: CtorKind = unsafe { mem::transmute((f & 0x7F) as u8) };
let current_index;
if ctor_kind == CtorKind::Previous {
current_index = reader.index() as isize;
reader.set_index(prev_index as usize);
ctor_kind = unsafe { mem::transmute((reader.read_u8() & 0x7F) as u8) };
} else {
current_index = -1;
prev_index = reader.index() as isize - 1;
}
let mut s = if (f & 0x80) != 0 {
let s = &strings[reader.read_compressed_u32() as usize];
let mut res = String::with_capacity(s.len() + 1);
res.push('v');
res.push_str(s);
res
} else {
strings[reader.read_compressed_u32() as usize].clone()
};
let c;
let v;
let v2;
let v3;
let info: Box<dyn InstrInfo + Sync + Send> = match ctor_kind {
CtorKind::Previous => unreachable!(),
CtorKind::Normal_1 => Box::new(SimpleInstrInfo::with_mnemonic(s)),
CtorKind::Normal_2a => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
Box::new(SimpleInstrInfo::with_mnemonic_suffix(s, s2))
}
CtorKind::Normal_2b => {
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo::with_mnemonic_flags(s, v))
}
CtorKind::Normal_2c => {
c = reader.read_u8() as u8 as char;
s = add_suffix(&s, c);
Box::new(SimpleInstrInfo::with_mnemonic(s))
}
CtorKind::Normal_3 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo::new(s, s2, v))
}
CtorKind::AamAad => Box::new(SimpleInstrInfo_AamAad::new(s)),
CtorKind::asz => {
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_as::new(v, s))
}
CtorKind::bnd => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_bnd::new(s, s2, v))
}
CtorKind::DeclareData => Box::new(SimpleInstrInfo_DeclareData::new(unsafe { mem::transmute(i as u16) }, s)),
CtorKind::er_2 => {
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_er::with_mnemonic(v, s))
}
CtorKind::er_4 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();<|fim▁hole|>
CtorKind::far => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_far::new(v, s, s2))
}
CtorKind::imul => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
Box::new(SimpleInstrInfo_imul::new(s, s2))
}
CtorKind::maskmovq => Box::new(SimpleInstrInfo_maskmovq::new(s)),
CtorKind::movabs => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
let s3 = strings[reader.read_compressed_u32() as usize].clone();
let s4 = add_suffix(&s3, c);
Box::new(SimpleInstrInfo_movabs::new(s, s2, s3, s4))
}
CtorKind::nop => {
v = reader.read_compressed_u32();
v2 = reader.read_u8() as u32;
Box::new(SimpleInstrInfo_nop::new(v, s, unsafe { mem::transmute(v2 as u8) }))
}
CtorKind::OpSize => {
v = reader.read_u8() as u32;
let s2 = add_suffix(&s, 'w');
let s3 = add_suffix(&s, 'l');
let s4 = add_suffix(&s, 'q');
Box::new(SimpleInstrInfo_OpSize::new(unsafe { mem::transmute(v as u8) }, s, s2, s3, s4))
}
CtorKind::OpSize2_bnd => {
let s2 = strings[reader.read_compressed_u32() as usize].clone();
let s3 = strings[reader.read_compressed_u32() as usize].clone();
let s4 = strings[reader.read_compressed_u32() as usize].clone();
Box::new(SimpleInstrInfo_OpSize2_bnd::new(s, s2, s3, s4))
}
CtorKind::OpSize3 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_OpSize3::new(v, s, s2))
}
CtorKind::os => {
v = reader.read_compressed_u32();
v2 = reader.read_u8() as u32;
debug_assert!(v2 <= 1);
v3 = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os::new(v, s, v2 != 0, v3))
}
CtorKind::CC_1 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_cc::new(v, vec![s], vec![s2]))
}
CtorKind::CC_2 => {
let s2 = strings[reader.read_compressed_u32() as usize].clone();
c = reader.read_u8() as u8 as char;
let s3 = add_suffix(&s, c);
let s4 = add_suffix(&s2, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_cc::new(v, vec![s, s2], vec![s3, s4]))
}
CtorKind::CC_3 => {
let s2 = strings[reader.read_compressed_u32() as usize].clone();
let s3 = strings[reader.read_compressed_u32() as usize].clone();
c = reader.read_u8() as u8 as char;
let s4 = add_suffix(&s, c);
let s5 = add_suffix(&s2, c);
let s6 = add_suffix(&s3, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_cc::new(v, vec![s, s2, s3], vec![s4, s5, s6]))
}
CtorKind::os_jcc_1 => {
v2 = reader.read_compressed_u32();
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os_jcc::new(v, v2, vec![s]))
}
CtorKind::os_jcc_2 => {
let s2 = strings[reader.read_compressed_u32() as usize].clone();
v2 = reader.read_compressed_u32();
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os_jcc::new(v, v2, vec![s, s2]))
}
CtorKind::os_jcc_3 => {
let s2 = strings[reader.read_compressed_u32() as usize].clone();
let s3 = strings[reader.read_compressed_u32() as usize].clone();
v2 = reader.read_compressed_u32();
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os_jcc::new(v, v2, vec![s, s2, s3]))
}
CtorKind::os_loopcc => {
let s2 = strings[reader.read_compressed_u32() as usize].clone();
c = reader.read_u8() as u8 as char;
let s3 = add_suffix(&s, c);
let s4 = add_suffix(&s2, c);
v3 = reader.read_compressed_u32();
v = reader.read_compressed_u32();
v2 = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os_loop::new(v, v2, v3, vec![s, s2], vec![s3, s4]))
}
CtorKind::os_loop => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
v2 = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os_loop::new(v, v2, u32::MAX, vec![s], vec![s2]))
}
CtorKind::os_mem => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os_mem::new(v, s, s2))
}
CtorKind::Reg16 => {
let s2 = add_suffix(&s, 'w');
Box::new(SimpleInstrInfo_Reg16::new(s, s2))
}
CtorKind::os_mem2 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os_mem2::new(v, s, s2))
}
CtorKind::os2_3 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
v2 = reader.read_u8() as u32;
debug_assert!(v2 <= 1);
Box::new(SimpleInstrInfo_os2::new(v, s, s2, v2 != 0, 0))
}
CtorKind::os2_4 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
v = reader.read_compressed_u32();
v2 = reader.read_u8() as u32;
debug_assert!(v2 <= 1);
v3 = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_os2::new(v, s, s2, v2 != 0, v3))
}
CtorKind::pblendvb => Box::new(SimpleInstrInfo_pblendvb::new(s)),
CtorKind::pclmulqdq => {
v = reader.read_u8() as u32;
Box::new(SimpleInstrInfo_pclmulqdq::new(s, get_pseudo_ops(unsafe { mem::transmute(v as u8) })))
}
CtorKind::pops => {
v = reader.read_u8() as u32;
v2 = reader.read_u8() as u32;
debug_assert!(v2 <= 1);
Box::new(SimpleInstrInfo_pops::new(s, get_pseudo_ops(unsafe { mem::transmute(v as u8) }), v2 != 0))
}
CtorKind::mem16 => {
c = reader.read_u8() as u8 as char;
let s2 = add_suffix(&s, c);
let s3 = add_suffix(&s, 'w');
Box::new(SimpleInstrInfo_mem16::new(s, s2, s3))
}
CtorKind::Reg32 => Box::new(SimpleInstrInfo_Reg32::new(s)),
CtorKind::sae => {
v = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_sae::new(v, s))
}
CtorKind::ST_STi => Box::new(SimpleInstrInfo_ST_STi::new(s)),
CtorKind::STi_ST => {
v = reader.read_u8() as u32;
debug_assert!(v <= 1);
Box::new(SimpleInstrInfo_STi_ST::new(s, v != 0))
}
CtorKind::STIG1 => {
v = reader.read_u8() as u32;
debug_assert!(v <= 1);
Box::new(SimpleInstrInfo_STIG1::new(s, v != 0))
}
};
infos.push(info);
if current_index >= 0 {
reader.set_index(current_index as usize);
}
}
debug_assert!(!reader.can_read());
infos
}<|fim▁end|> | v2 = reader.read_compressed_u32();
Box::new(SimpleInstrInfo_er::new(v, s, s2, v2))
} |
<|file_name|>issue-23968-const-not-overflow.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>const U16_MAX_HALF: u16 = !0u16 / 2;
const U32_MAX_HALF: u32 = !0u32 / 2;
const U64_MAX_HALF: u64 = !0u64 / 2;
fn main() {
assert_eq!(U8_MAX_HALF, 0x7f);
assert_eq!(U16_MAX_HALF, 0x7fff);
assert_eq!(U32_MAX_HALF, 0x7fff_ffff);
assert_eq!(U64_MAX_HALF, 0x7fff_ffff_ffff_ffff);
}<|fim▁end|> | // run-pass
const U8_MAX_HALF: u8 = !0u8 / 2; |
<|file_name|>useBlockingPaginationFragment-with-suspense-transition-test.js<|end_file_name|><|fim▁begin|>/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @emails oncall+relay
* @flow
* @format
*/
// flowlint ambiguous-object-type:error
'use strict';
const React = require('react');
const Scheduler = require('scheduler');
import type {Direction} from '../useLoadMoreFunction';
import type {OperationDescriptor, Variables} from 'relay-runtime';
const {useEffect, useTransition, useMemo, useState} = React;
const TestRenderer = require('react-test-renderer');
const invariant = require('invariant');
const useBlockingPaginationFragmentOriginal = require('../useBlockingPaginationFragment');
const ReactRelayContext = require('react-relay/ReactRelayContext');
const {
ConnectionHandler,
FRAGMENT_OWNER_KEY,
FRAGMENTS_KEY,
ID_KEY,
createOperationDescriptor,
graphql,
getRequest,
getFragment,
} = require('relay-runtime');
const {createMockEnvironment} = require('relay-test-utils');
// TODO: We're switching the tuple order of useTransition so for ~1 day we
// need to disable this test so we can flip in www then fbsource.
const TEMPORARY_SKIP_WHILE_REFACTORING_USE_TRANSITION = true;
describe('useBlockingPaginationFragment with useTransition', () => {
if (
TEMPORARY_SKIP_WHILE_REFACTORING_USE_TRANSITION ||
typeof React.useTransition !== 'function'
) {
it('empty test to prevent Jest from failing', () => {
// This suite is only useful with experimental React build
});
} else {
let environment;
let initialUser;
let gqlQuery;
let gqlQueryWithoutID;
let gqlPaginationQuery;
let gqlFragment;
let query;
let queryWithoutID;
let paginationQuery;
let variables;
let variablesWithoutID;
let setOwner;
let renderFragment;
let loadNext;
let refetch;
let forceUpdate;
let release;
let Renderer;
class ErrorBoundary extends React.Component<any, any> {
state = {error: null};
componentDidCatch(error) {
this.setState({error});
}
render() {
const {children, fallback} = this.props;
const {error} = this.state;
if (error) {
return React.createElement(fallback, {error});
}
return children;
}
}
function useBlockingPaginationFragmentWithSuspenseTransition(
fragmentNode,
fragmentRef,
) {
const [isPendingNext, startTransition] = useTransition();
// $FlowFixMe[incompatible-call]
const {data, ...result} = useBlockingPaginationFragmentOriginal(
fragmentNode,
// $FlowFixMe[prop-missing]
// $FlowFixMe[incompatible-call]
fragmentRef,
);
loadNext = (...args) => {
let disposable = {dispose: () => {}};
startTransition(() => {
disposable = result.loadNext(...args);
});
return disposable;
};
refetch = result.refetch;
// $FlowFixMe[prop-missing]
result.isPendingNext = isPendingNext;
useEffect(() => {
Scheduler.unstable_yieldValue({data, ...result});
});
return {data, ...result};
}
function assertYieldsWereCleared() {
const actualYields = Scheduler.unstable_clearYields();
if (actualYields.length !== 0) {
throw new Error(
'Log of yielded values is not empty. ' +
'Call expect(Scheduler).toHaveYielded(...) first.',
);
}
}
function assertYield(expected, actual) {
expect(actual.data).toEqual(expected.data);
expect(actual.isPendingNext).toEqual(expected.isPendingNext);
expect(actual.hasNext).toEqual(expected.hasNext);
expect(actual.hasPrevious).toEqual(expected.hasPrevious);
}
function expectFragmentResults(
expectedYields: $ReadOnlyArray<{|
data: $FlowFixMe,
isPendingNext: boolean,
hasNext: boolean,
hasPrevious: boolean,
|}>,
) {
assertYieldsWereCleared();
Scheduler.unstable_flushNumberOfYields(expectedYields.length);
const actualYields = Scheduler.unstable_clearYields();
expect(actualYields.length).toEqual(expectedYields.length);
expectedYields.forEach((expected, idx) =>
assertYield(expected, actualYields[idx]),
);
}
function expectRequestIsInFlight(expected) {
expect(environment.execute).toBeCalledTimes(expected.requestCount);
expect(
environment.mock.isLoading(
gqlPaginationQuery,
expected.paginationVariables,
{force: true},
),
).toEqual(expected.inFlight);
}
function expectFragmentIsPendingOnPagination(
renderer,
direction: Direction,
expected: {|
data: mixed,
hasNext: boolean,
hasPrevious: boolean,
paginationVariables: Variables,
|},
) {
// Assert fragment sets isPending to true
expectFragmentResults([
{
data: expected.data,
isPendingNext: direction === 'forward',
hasNext: expected.hasNext,
hasPrevious: expected.hasPrevious,
},
]);
// Assert refetch query was fetched
expectRequestIsInFlight({...expected, inFlight: true, requestCount: 1});
}
function createFragmentRef(id, owner) {
return {
[ID_KEY]: id,
[FRAGMENTS_KEY]: {
useBlockingPaginationFragmentWithSuspenseTransitionTestNestedUserFragment: {},
},
[FRAGMENT_OWNER_KEY]: owner.request,
};
}
beforeEach(() => {
// Set up mocks
jest.resetModules();
jest.mock('warning');
jest.mock('scheduler', () => {
return jest.requireActual('scheduler/unstable_mock');
});
// Supress `act` warnings since we are intentionally not
// using it for most tests here. `act` currently always
// flushes Suspense fallbacks, and that's not what we want
// when asserting pending/suspended states,
const originalLogError = console.error.bind(console);
jest.spyOn(console, 'error').mockImplementation((message, ...args) => {
if (typeof message === 'string' && message.includes('act(...)')) {
return;
}
originalLogError(message, ...args);
});
// Set up environment and base data
environment = createMockEnvironment({
handlerProvider: () => ConnectionHandler,
});
release = jest.fn();
environment.retain.mockImplementation((...args) => {
return {
dispose: release,
};
});
graphql`
fragment useBlockingPaginationFragmentWithSuspenseTransitionTestNestedUserFragment on User {
username
}
`;
gqlFragment = getFragment(graphql`
fragment useBlockingPaginationFragmentWithSuspenseTransitionTestUserFragment on User
@refetchable(
queryName: "useBlockingPaginationFragmentWithSuspenseTransitionTestUserFragmentPaginationQuery"
)
@argumentDefinitions(
isViewerFriendLocal: {type: "Boolean", defaultValue: false}
orderby: {type: "[String]"}
) {
id
name
friends(
after: $after
first: $first
before: $before
last: $last
orderby: $orderby
isViewerFriend: $isViewerFriendLocal
) @connection(key: "UserFragment_friends") {
edges {
node {
id
name
...useBlockingPaginationFragmentWithSuspenseTransitionTestNestedUserFragment
}
}
}
}
`);
gqlQuery = getRequest(
graphql`
query useBlockingPaginationFragmentWithSuspenseTransitionTestUserQuery(
$id: ID!
$after: ID
$first: Int
$before: ID
$last: Int
$orderby: [String]
$isViewerFriend: Boolean
) {
node(id: $id) {
actor {
...useBlockingPaginationFragmentWithSuspenseTransitionTestUserFragment
@arguments(
isViewerFriendLocal: $isViewerFriend
orderby: $orderby
)
}
}
}
`,
);
gqlQueryWithoutID = getRequest(graphql`
query useBlockingPaginationFragmentWithSuspenseTransitionTestUserQueryWithoutIDQuery(
$after: ID
$first: Int
$before: ID
$last: Int
$orderby: [String]
$isViewerFriend: Boolean
) {
viewer {
actor {
...useBlockingPaginationFragmentWithSuspenseTransitionTestUserFragment
@arguments(
isViewerFriendLocal: $isViewerFriend
orderby: $orderby
)
}
}
}
`);
variablesWithoutID = {
after: null,
first: 1,
before: null,
last: null,
isViewerFriend: false,
orderby: ['name'],
};
variables = {
...variablesWithoutID,
id: '<feedbackid>',
};
gqlPaginationQuery = require('./__generated__/useBlockingPaginationFragmentWithSuspenseTransitionTestUserFragmentPaginationQuery.graphql');
query = createOperationDescriptor(gqlQuery, variables);
queryWithoutID = createOperationDescriptor(
gqlQueryWithoutID,
variablesWithoutID,
);
paginationQuery = createOperationDescriptor(
gqlPaginationQuery,
variables,
);
environment.commitPayload(query, {
node: {
__typename: 'Feedback',
id: '<feedbackid>',
actor: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
username: 'username:node:1',
},
},
],
pageInfo: {
endCursor: 'cursor:1',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:1',
},
},
},
},
});
environment.commitPayload(queryWithoutID, {
viewer: {
actor: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
username: 'username:node:1',
},
},
],
pageInfo: {
endCursor: 'cursor:1',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:1',
},
},
},
},
});
// Set up renderers
Renderer = props => null;
const Container = (props: {
userRef?: {...},
owner: $FlowFixMe,
fragment: $FlowFixMe,
...
}) => {
// We need a render a component to run a Hook
const [owner, _setOwner] = useState(props.owner);
const [_, _setCount] = useState(0);
const fragment = props.fragment ?? gqlFragment;
const artificialUserRef = useMemo(() => {
const snapshot = environment.lookup(owner.fragment);
return (snapshot.data: $FlowFixMe)?.node?.actor;
}, [owner]);
const userRef = props.hasOwnProperty('userRef')
? props.userRef
: artificialUserRef;
setOwner = _setOwner;
forceUpdate = _setCount;
const {
data: userData,
} = useBlockingPaginationFragmentWithSuspenseTransition(
fragment,
// $FlowFixMe[prop-missing]
userRef,
);
return <Renderer user={userData} />;
};
const ContextProvider = ({children}) => {
// TODO(T39494051) - We set empty variables in relay context to make
// Flow happy, but useBlockingPaginationFragment does not use them, instead it uses
// the variables from the fragment owner.
const relayContext = useMemo(() => ({environment}), []);
return (
<ReactRelayContext.Provider value={relayContext}>
{children}
</ReactRelayContext.Provider>
);
};
const Fallback = () => {
useEffect(() => {
Scheduler.unstable_yieldValue('Fallback');
});
return 'Fallback';
};
renderFragment = (args?: {
isConcurrent?: boolean,
owner?: $FlowFixMe,
userRef?: $FlowFixMe,
fragment?: $FlowFixMe,
...
}): $FlowFixMe => {
const {isConcurrent = true, ...props} = args ?? {};
return TestRenderer.create(
<ErrorBoundary fallback={({error}) => `Error: ${error.message}`}>
<React.Suspense fallback={<Fallback />}>
<ContextProvider>
<Container owner={query} {...props} />
</ContextProvider>
</React.Suspense>
</ErrorBoundary>,
// $FlowFixMe[prop-missing] - error revealed when flow-typing ReactTestRenderer
{unstable_isConcurrent: isConcurrent},
);
};
initialUser = {
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
...createFragmentRef('node:1', query),
},
},
],
pageInfo: {
endCursor: 'cursor:1',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:1',
},
},
};
});
afterEach(() => {
environment.mockClear();
jest.dontMock('scheduler');
});
describe('loadNext', () => {
const direction = 'forward';
// Sanity check test, should already be tested in useBlockingPagination test
it('loads and renders next items in connection', () => {
const callback = jest.fn();
const renderer = renderFragment();
expectFragmentResults([
{
data: initialUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
loadNext(1, {onComplete: callback});
const paginationVariables = {
id: '1',
after: 'cursor:1',
first: 1,
before: null,<|fim▁hole|> };
expectFragmentIsPendingOnPagination(renderer, direction, {
data: initialUser,
hasNext: true,
hasPrevious: false,
paginationVariables,
});
expect(callback).toBeCalledTimes(0);
expect(renderer.toJSON()).toEqual(null);
environment.mock.resolve(gqlPaginationQuery, {
data: {
node: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
username: 'username:node:2',
},
},
],
pageInfo: {
startCursor: 'cursor:2',
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: true,
},
},
},
},
});
const expectedUser = {
...initialUser,
friends: {
...initialUser.friends,
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
...createFragmentRef('node:1', query),
},
},
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
...createFragmentRef('node:2', query),
},
},
],
pageInfo: {
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:1',
},
},
};
expectFragmentResults([
{
data: expectedUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
expect(callback).toBeCalledTimes(1);
});
it('renders pending flag correctly if pagination update is interrupted before it commits (unsuspends)', () => {
const callback = jest.fn();
const renderer = renderFragment();
expectFragmentResults([
{
data: initialUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
loadNext(1, {onComplete: callback});
const paginationVariables = {
id: '1',
after: 'cursor:1',
first: 1,
before: null,
last: null,
isViewerFriendLocal: false,
orderby: ['name'],
};
expectFragmentIsPendingOnPagination(renderer, direction, {
data: initialUser,
hasNext: true,
hasPrevious: false,
paginationVariables,
});
expect(callback).toBeCalledTimes(0);
expect(renderer.toJSON()).toEqual(null);
// Schedule a high-pri update while the component is
// suspended on pagination
Scheduler.unstable_runWithPriority(
Scheduler.unstable_UserBlockingPriority,
() => {
forceUpdate(prev => prev + 1);
},
);
// Assert high-pri update is rendered when initial update
// that suspended hasn't committed
// Assert that the avoided Suspense fallback isn't rendered
expect(renderer.toJSON()).toEqual(null);
expectFragmentResults([
{
data: initialUser,
// Assert that isPending flag is still true
isPendingNext: true,
hasNext: true,
hasPrevious: false,
},
]);
// Assert list is updated after pagination request completes
environment.mock.resolve(gqlPaginationQuery, {
data: {
node: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
username: 'username:node:2',
},
},
],
pageInfo: {
startCursor: 'cursor:2',
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: true,
},
},
},
},
});
const expectedUser = {
...initialUser,
friends: {
...initialUser.friends,
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
...createFragmentRef('node:1', query),
},
},
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
...createFragmentRef('node:2', query),
},
},
],
pageInfo: {
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:1',
},
},
};
expectFragmentResults([
{
data: expectedUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
expect(callback).toBeCalledTimes(1);
});
it('loads more correctly when original variables do not include an id', () => {
const callback = jest.fn();
const viewer = environment.lookup(queryWithoutID.fragment).data?.viewer;
const userRef =
typeof viewer === 'object' && viewer != null ? viewer?.actor : null;
invariant(userRef != null, 'Expected to have cached test data');
let expectedUser = {
...initialUser,
friends: {
...initialUser.friends,
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
...createFragmentRef('node:1', queryWithoutID),
},
},
],
},
};
const renderer = renderFragment({owner: queryWithoutID, userRef});
expectFragmentResults([
{
data: expectedUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
loadNext(1, {onComplete: callback});
const paginationVariables = {
id: '1',
after: 'cursor:1',
first: 1,
before: null,
last: null,
isViewerFriendLocal: false,
orderby: ['name'],
};
expectFragmentIsPendingOnPagination(renderer, direction, {
data: expectedUser,
hasNext: true,
hasPrevious: false,
paginationVariables,
});
expect(callback).toBeCalledTimes(0);
expect(renderer.toJSON()).toEqual(null);
environment.mock.resolve(gqlPaginationQuery, {
data: {
node: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
username: 'username:node:2',
},
},
],
pageInfo: {
startCursor: 'cursor:2',
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: true,
},
},
},
},
});
expectedUser = {
...initialUser,
friends: {
...initialUser.friends,
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
...createFragmentRef('node:1', queryWithoutID),
},
},
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
...createFragmentRef('node:2', queryWithoutID),
},
},
],
pageInfo: {
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:1',
},
},
};
expectFragmentResults([
{
data: expectedUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
expect(callback).toBeCalledTimes(1);
});
it('calls callback with error when error occurs during fetch', () => {
const callback = jest.fn();
const renderer = renderFragment();
expectFragmentResults([
{
data: initialUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
loadNext(1, {onComplete: callback});
const paginationVariables = {
id: '1',
after: 'cursor:1',
first: 1,
before: null,
last: null,
isViewerFriendLocal: false,
orderby: ['name'],
};
expectFragmentIsPendingOnPagination(renderer, direction, {
data: initialUser,
hasNext: true,
hasPrevious: false,
paginationVariables,
});
expect(callback).toBeCalledTimes(0);
expect(renderer.toJSON()).toEqual(null);
const error = new Error('Oops');
environment.mock.reject(gqlPaginationQuery, error);
// We pass the error in the callback, but do not throw during render
// since we want to continue rendering the existing items in the
// connection
expect(callback).toBeCalledTimes(1);
expect(callback).toBeCalledWith(error);
});
it('preserves pagination request if re-rendered with same fragment ref', () => {
const callback = jest.fn();
const renderer = renderFragment();
expectFragmentResults([
{
data: initialUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
loadNext(1, {onComplete: callback});
const paginationVariables = {
id: '1',
after: 'cursor:1',
first: 1,
before: null,
last: null,
isViewerFriendLocal: false,
orderby: ['name'],
};
expectFragmentIsPendingOnPagination(renderer, direction, {
data: initialUser,
hasNext: true,
hasPrevious: false,
paginationVariables,
});
expect(callback).toBeCalledTimes(0);
expect(renderer.toJSON()).toEqual(null);
setOwner({...query});
// Assert that request is still in flight after re-rendering
// with new fragment ref that points to the same data.
expectRequestIsInFlight({
inFlight: true,
requestCount: 1,
gqlPaginationQuery,
paginationVariables,
});
expect(callback).toBeCalledTimes(0);
environment.mock.resolve(gqlPaginationQuery, {
data: {
node: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
username: 'username:node:2',
},
},
],
pageInfo: {
startCursor: 'cursor:2',
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: true,
},
},
},
},
});
const expectedUser = {
...initialUser,
friends: {
...initialUser.friends,
edges: [
{
cursor: 'cursor:1',
node: {
__typename: 'User',
id: 'node:1',
name: 'name:node:1',
...createFragmentRef('node:1', query),
},
},
{
cursor: 'cursor:2',
node: {
__typename: 'User',
id: 'node:2',
name: 'name:node:2',
...createFragmentRef('node:2', query),
},
},
],
pageInfo: {
endCursor: 'cursor:2',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:1',
},
},
};
expectFragmentResults([
{
data: expectedUser,
isPendingNext: true,
hasNext: true,
hasPrevious: false,
},
{
data: expectedUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
expect(callback).toBeCalledTimes(1);
});
});
describe('refetch', () => {
// The bulk of refetch behavior is covered in useRefetchableFragmentNode-test,
// so this suite covers the pagination-related test cases.
function expectRefetchRequestIsInFlight(expected) {
expect(environment.executeWithSource).toBeCalledTimes(
expected.requestCount,
);
expect(
environment.mock.isLoading(
expected.gqlRefetchQuery ?? gqlPaginationQuery,
expected.refetchVariables,
{force: true},
),
).toEqual(expected.inFlight);
}
function expectFragmentSuspendedOnRefetch(
renderer,
expected: {|
data: mixed,
hasNext: boolean,
hasPrevious: boolean,
refetchVariables: Variables,
refetchQuery?: OperationDescriptor,
gqlRefetchQuery?: $FlowFixMe,
|},
flushFallback: boolean = true,
) {
assertYieldsWereCleared();
TestRenderer.act(() => {
// Wrap in act to ensure passive effects are run
jest.runAllImmediates();
});
Scheduler.unstable_flushNumberOfYields(1);
const actualYields = Scheduler.unstable_clearYields();
if (flushFallback) {
// Flushing fallbacks by running a timer could cause other side-effects
// such as releasing retained queries. Until React has a better way to flush
// fallbacks, we can't test fallbacks and other timeout based effects at the same time.
jest.runOnlyPendingTimers(); // Tigger fallbacks.
// Assert component suspendeds
expect(actualYields.length).toEqual(1);
expect(actualYields[0]).toEqual('Fallback');
expect(renderer.toJSON()).toEqual('Fallback');
}
// Assert refetch query was fetched
expectRefetchRequestIsInFlight({
...expected,
inFlight: true,
requestCount: 1,
});
// Assert query is retained by loadQuery
// and tentatively retained while component is suspended
expect(environment.retain).toBeCalledTimes(2);
expect(environment.retain.mock.calls[0][0]).toEqual(
expected.refetchQuery,
);
}
it('loads more items correctly after refetching', () => {
const renderer = renderFragment();
expectFragmentResults([
{
data: initialUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
refetch({isViewerFriendLocal: true, orderby: ['lastname']});
// Assert that fragment is refetching with the right variables and
// suspends upon refetch
const refetchVariables = {
after: null,
first: 1,
before: null,
last: null,
id: '1',
isViewerFriendLocal: true,
orderby: ['lastname'],
};
paginationQuery = createOperationDescriptor(
gqlPaginationQuery,
refetchVariables,
{force: true},
);
expectFragmentSuspendedOnRefetch(
renderer,
{
data: initialUser,
hasNext: true,
hasPrevious: false,
refetchVariables,
refetchQuery: paginationQuery,
},
false,
);
// Mock network response
environment.mock.resolve(gqlPaginationQuery, {
data: {
node: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:100',
node: {
__typename: 'User',
id: 'node:100',
name: 'name:node:100',
username: 'username:node:100',
},
},
],
pageInfo: {
endCursor: 'cursor:100',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:100',
},
},
},
},
});
// Assert fragment is rendered with new data
const expectedUser = {
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:100',
node: {
__typename: 'User',
id: 'node:100',
name: 'name:node:100',
...createFragmentRef('node:100', paginationQuery),
},
},
],
pageInfo: {
endCursor: 'cursor:100',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:100',
},
},
};
jest.runAllImmediates();
expectFragmentResults([
{
data: expectedUser,
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
// Assert refetch query was retained by loadQuery and component
expect(release).not.toBeCalled();
expect(environment.retain).toBeCalledTimes(2);
expect(environment.retain.mock.calls[0][0]).toEqual(paginationQuery);
// Paginate after refetching
environment.execute.mockClear();
loadNext(1);
const paginationVariables = {
id: '1',
after: 'cursor:100',
first: 1,
before: null,
last: null,
isViewerFriendLocal: true,
orderby: ['lastname'],
};
expectFragmentIsPendingOnPagination(renderer, 'forward', {
data: expectedUser,
hasNext: true,
hasPrevious: false,
paginationVariables,
});
environment.mock.resolve(gqlPaginationQuery, {
data: {
node: {
__typename: 'User',
id: '1',
name: 'Alice',
friends: {
edges: [
{
cursor: 'cursor:200',
node: {
__typename: 'User',
id: 'node:200',
name: 'name:node:200',
username: 'username:node:200',
},
},
],
pageInfo: {
startCursor: 'cursor:200',
endCursor: 'cursor:200',
hasNextPage: true,
hasPreviousPage: true,
},
},
},
},
});
const paginatedUser = {
...expectedUser,
friends: {
...expectedUser.friends,
edges: [
{
cursor: 'cursor:100',
node: {
__typename: 'User',
id: 'node:100',
name: 'name:node:100',
...createFragmentRef('node:100', paginationQuery),
},
},
{
cursor: 'cursor:200',
node: {
__typename: 'User',
id: 'node:200',
name: 'name:node:200',
...createFragmentRef('node:200', paginationQuery),
},
},
],
pageInfo: {
endCursor: 'cursor:200',
hasNextPage: true,
hasPreviousPage: false,
startCursor: 'cursor:100',
},
},
};
expectFragmentResults([
{
data: paginatedUser,
// Assert pending flag is set back to false
isPendingNext: false,
hasNext: true,
hasPrevious: false,
},
]);
});
});
}
});<|fim▁end|> | last: null,
isViewerFriendLocal: false,
orderby: ['name'], |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|># encoding: utf-8
"""
Admin interface for the sphinxdoc app.
<|fim▁hole|>
class ProjectAdmin(admin.ModelAdmin):
"""Admin interface for :class:`~sphinxdoc.models.Project`."""
list_display = ('name', 'path',)
prepopulated_fields = {'slug': ('name',)}
class DocumentAdmin(admin.ModelAdmin):
"""
Admin interface for :class:`~sphinxdoc.models.Document`.
Normally, you shouldn’t need this, since you create new documents via
the management command.
"""
pass
admin.site.register(Project, ProjectAdmin)
admin.site.register(Document, DocumentAdmin)<|fim▁end|> | """
from django.contrib import admin
from sphinxdoc.models import Project, Document |
<|file_name|>library.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015 Alex Meade
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Navneet Singh
# Copyright (c) 2015 Yogesh Kshirsagar
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import socket
import time
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import loopingcall
from cinder import utils as cinder_utils
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(na_opts.netapp_basicauth_opts)
CONF.register_opts(na_opts.netapp_connection_opts)
CONF.register_opts(na_opts.netapp_eseries_opts)
CONF.register_opts(na_opts.netapp_transport_opts)
class NetAppESeriesLibrary(object):
"""Executes commands relating to Volumes."""
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_server_hostname', 'netapp_controller_ips',
'netapp_login', 'netapp_password',
'netapp_storage_pools']
SLEEP_SECS = 5
MAX_LUNS_PER_HOST = 255
HOST_TYPES = {'aix': 'AIX MPIO',
'avt': 'AVT_4M',
'factoryDefault': 'FactoryDefault',
'hpux': 'HP-UX TPGS',
'linux_atto': 'LnxTPGSALUA',
'linux_dm_mp': 'LnxALUA',
'linux_mpp_rdac': 'Linux',
'linux_pathmanager': 'LnxTPGSALUA_PM',
'macos': 'MacTPGSALUA',
'ontap': 'ONTAP',
'svc': 'SVC',
'solaris_v11': 'SolTPGSALUA',
'solaris_v10': 'Solaris',
'vmware': 'VmwTPGSALUA',
'windows':
'Windows 2000/Server 2003/Server 2008 Non-Clustered',
'windows_atto': 'WinTPGSALUA',
'windows_clustered':
'Windows 2000/Server 2003/Server 2008 Clustered'
}
# NOTE(ameade): This maps what is reported by the e-series api to a
# consistent set of values that are reported by all NetApp drivers
# to the cinder scheduler.
SSC_DISK_TYPE_MAPPING = {
'scsi': 'SCSI',
'fibre': 'FCAL',
'sas': 'SAS',
'sata': 'SATA',
}
SSC_UPDATE_INTERVAL = 60 # seconds
WORLDWIDENAME = 'worldWideName'
def __init__(self, driver_name, driver_protocol="iSCSI",
configuration=None, **kwargs):
self.configuration = configuration
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(
na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(na_opts.netapp_eseries_opts)
self.lookup_service = fczm_utils.create_lookup_service()
self._backend_name = self.configuration.safe_get(
"volume_backend_name") or "NetApp_ESeries"
self.driver_name = driver_name
self.driver_protocol = driver_protocol
self._stats = {}
self._ssc_stats = {}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
port = self.configuration.netapp_server_port
scheme = self.configuration.netapp_transport_type.lower()
if port is None:
if scheme == 'http':
port = 8080
elif scheme == 'https':
port = 8443
self._client = client.RestClient(
scheme=scheme,
host=self.configuration.netapp_server_hostname,
port=port,
service_path=self.configuration.netapp_webservice_path,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password)
self._check_mode_get_or_register_storage_system()
def _start_periodic_tasks(self):
ssc_periodic_task = loopingcall.FixedIntervalLoopingCall(
self._update_ssc_info)
ssc_periodic_task.start(interval=self.SSC_UPDATE_INTERVAL)
def check_for_setup_error(self):
self._check_host_type()
self._check_multipath()
self._check_storage_system()
self._start_periodic_tasks()
def _check_host_type(self):
self.host_type =\
self.HOST_TYPES.get(self.configuration.netapp_eseries_host_type,
None)
if not self.host_type:
raise exception.NetAppDriverException(
_('Configured host type is not supported.'))
def _check_multipath(self):
if not self.configuration.use_multipath_for_image_xfer:
msg = _LW('Production use of "%(backend)s" backend requires the '
'Cinder controller to have multipathing properly set up '
'and the configuration option "%(mpflag)s" to be set to '
'"True".') % {'backend': self._backend_name,
'mpflag': 'use_multipath_for_image_xfer'}
LOG.warning(msg)
def _check_mode_get_or_register_storage_system(self):
"""Does validity checks for storage system registry and health."""
def _resolve_host(host):
try:
ip = na_utils.resolve_hostname(host)
return ip
except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.')
% {'host': host, 'e': e})
raise exception.NoValidHost(
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
% {'host': host, 'e': e})
ips = self.configuration.netapp_controller_ips
ips = [i.strip() for i in ips.split(",")]
ips = [x for x in ips if _resolve_host(x)]
host = na_utils.resolve_hostname(
self.configuration.netapp_server_hostname)
if host in ips:
LOG.info(_LI('Embedded mode detected.'))
system = self._client.list_storage_systems()[0]
else:
LOG.info(_LI('Proxy mode detected.'))
system = self._client.register_storage_system(
ips, password=self.configuration.netapp_sa_password)
self._client.set_system_id(system.get('id'))
def _check_storage_system(self):
"""Checks whether system is registered and has good status."""
try:
system = self._client.list_storage_system()
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
msg = _LI("System with controller addresses [%s] is not"
" registered with web service.")
LOG.info(msg % self.configuration.netapp_controller_ips)
password_not_in_sync = False
if system.get('status', '').lower() == 'passwordoutofsync':
password_not_in_sync = True
new_pwd = self.configuration.netapp_sa_password
self._client.update_stored_system_password(new_pwd)
time.sleep(self.SLEEP_SECS)
sa_comm_timeout = 60
comm_time = 0
while True:
system = self._client.list_storage_system()
status = system.get('status', '').lower()
# wait if array not contacted or
# password was not in sync previously.
if ((status == 'nevercontacted') or
(password_not_in_sync and status == 'passwordoutofsync')):
LOG.info(_LI('Waiting for web service array communication.'))
time.sleep(self.SLEEP_SECS)
comm_time = comm_time + self.SLEEP_SECS
if comm_time >= sa_comm_timeout:
msg = _("Failure in communication between web service and"
" array. Waited %s seconds. Verify array"
" configuration parameters.")
raise exception.NetAppDriverException(msg %
sa_comm_timeout)
else:
break
msg_dict = {'id': system.get('id'), 'status': status}
if (status == 'passwordoutofsync' or status == 'notsupported' or
status == 'offline'):
msg = _("System %(id)s found with bad status - %(status)s.")
raise exception.NetAppDriverException(msg % msg_dict)
LOG.info(_LI("System %(id)s has %(status)s status.") % msg_dict)
return True<|fim▁hole|> return self._get_volume_with_label_wwn(label)
def _get_volume_with_label_wwn(self, label=None, wwn=None):
"""Searches volume with label or wwn or both."""
if not (label or wwn):
raise exception.InvalidInput(_('Either volume label or wwn'
' is required as input.'))
wwn = wwn.replace(':', '').upper() if wwn else None
eseries_volume = None
for vol in self._client.list_volumes():
if label and vol.get('label') != label:
continue
if wwn and vol.get(self.WORLDWIDENAME).upper() != wwn:
continue
eseries_volume = vol
break
if not eseries_volume:
raise KeyError()
return eseries_volume
def _get_snapshot_group_for_snapshot(self, snapshot_id):
label = utils.convert_uuid_to_es_fmt(snapshot_id)
for group in self._client.list_snapshot_groups():
if group['label'] == label:
return group
msg = _("Specified snapshot group with label %s could not be found.")
raise exception.NotFound(msg % label)
def _get_latest_image_in_snapshot_group(self, snapshot_id):
group = self._get_snapshot_group_for_snapshot(snapshot_id)
images = self._client.list_snapshot_images()
if images:
filtered_images = filter(lambda img: (img['pitGroupRef'] ==
group['pitGroupRef']),
images)
sorted_imgs = sorted(filtered_images, key=lambda x: x[
'pitTimestamp'])
return sorted_imgs[0]
msg = _("No snapshot image found in snapshot group %s.")
raise exception.NotFound(msg % group['label'])
def _is_volume_containing_snaps(self, label):
"""Checks if volume contains snapshot groups."""
vol_id = utils.convert_es_fmt_to_uuid(label)
for snap in self._client.list_snapshot_groups():
if snap['baseVolume'] == vol_id:
return True
return False
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
eseries_volume = self._get_volume(volume['name_id'])
storage_pool = self._client.get_storage_pool(
eseries_volume['volumeGroupRef'])
if storage_pool:
return storage_pool.get('label')
def create_volume(self, volume):
"""Creates a volume."""
LOG.debug('create_volume on %s' % volume['host'])
# get E-series pool label as pool name
eseries_pool_label = volume_utils.extract_host(volume['host'],
level='pool')
if eseries_pool_label is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
eseries_volume_label = utils.convert_uuid_to_es_fmt(volume['name_id'])
# get size of the requested volume creation
size_gb = int(volume['size'])
self._create_volume(eseries_pool_label,
eseries_volume_label,
size_gb)
def _create_volume(self, eseries_pool_label, eseries_volume_label,
size_gb):
"""Creates volume with given label and size."""
target_pool = None
pools = self._get_storage_pools()
for pool in pools:
if pool["label"] == eseries_pool_label:
target_pool = pool
break
if not target_pool:
msg = _("Pools %s does not exist")
raise exception.NetAppDriverException(msg % eseries_pool_label)
try:
vol = self._client.create_volume(target_pool['volumeGroupRef'],
eseries_volume_label, size_gb)
LOG.info(_LI("Created volume with "
"label %s."), eseries_volume_label)
except exception.NetAppDriverException as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating volume. Msg - %s."),
six.text_type(e))
return vol
def _schedule_and_create_volume(self, label, size_gb):
"""Creates volume with given label and size."""
avl_pools = self._get_sorted_available_storage_pools(size_gb)
for pool in avl_pools:
try:
vol = self._client.create_volume(pool['volumeGroupRef'],
label, size_gb)
LOG.info(_LI("Created volume with label %s."), label)
return vol
except exception.NetAppDriverException as e:
LOG.error(_LE("Error creating volume. Msg - %s."), e)
msg = _("Failure creating volume %s.")
raise exception.NetAppDriverException(msg % label)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
label = utils.convert_uuid_to_es_fmt(volume['id'])
size = volume['size']
dst_vol = self._schedule_and_create_volume(label, size)
try:
src_vol = None
src_vol = self._create_snapshot_volume(snapshot['id'])
self._copy_volume_high_prior_readonly(src_vol, dst_vol)
LOG.info(_LI("Created volume with label %s."), label)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
self._client.delete_volume(dst_vol['volumeRef'])
finally:
if src_vol:
try:
self._client.delete_snapshot_volume(src_vol['id'])
except exception.NetAppDriverException as e:
LOG.error(_LE("Failure deleting snap vol. Error: %s."), e)
else:
LOG.warning(_LW("Snapshot volume not found."))
def _create_snapshot_volume(self, snapshot_id):
"""Creates snapshot volume for given group with snapshot_id."""
group = self._get_snapshot_group_for_snapshot(snapshot_id)
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_latest_image_in_snapshot_group(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
capacity = int(image['pitCapacity']) / units.Gi
storage_pools = self._get_sorted_available_storage_pools(capacity)
s_id = storage_pools[0]['volumeGroupRef']
return self._client.create_snapshot_volume(image['pitRef'], label,
group['baseVolume'], s_id)
def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
"""Copies src volume to dest volume."""
LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
% {'src': src_vol['label'], 'dst': dst_vol['label']})
try:
job = None
job = self._client.create_volume_copy_job(src_vol['id'],
dst_vol['volumeRef'])
while True:
j_st = self._client.list_vol_copy_job(job['volcopyRef'])
if (j_st['status'] == 'inProgress' or j_st['status'] ==
'pending' or j_st['status'] == 'unknown'):
time.sleep(self.SLEEP_SECS)
continue
if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
LOG.error(_LE("Vol copy job status %s."), j_st['status'])
msg = _("Vol copy job for dest %s failed.")\
% dst_vol['label']
raise exception.NetAppDriverException(msg)
LOG.info(_LI("Vol copy job completed for dest %s.")
% dst_vol['label'])
break
finally:
if job:
try:
self._client.delete_vol_copy_job(job['volcopyRef'])
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting "
"job %s."), job['volcopyRef'])
else:
LOG.warning(_LW('Volume copy job for src vol %s not found.'),
src_vol['id'])
LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
snapshot = {'id': uuid.uuid4(), 'volume_id': src_vref['id'],
'volume': src_vref}
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
self.delete_snapshot(snapshot)
except exception.NetAppDriverException:
LOG.warning(_LW("Failure deleting temp snapshot %s."),
snapshot['id'])
def delete_volume(self, volume):
"""Deletes a volume."""
try:
vol = self._get_volume(volume['name_id'])
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException:
LOG.warning(_LI("Volume %s already deleted."), volume['id'])
return
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snap_grp, snap_image = None, None
snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
os_vol = snapshot['volume']
vol = self._get_volume(os_vol['name_id'])
vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_available_storage_pools(vol_size_gb)
try:
snap_grp = self._client.create_snapshot_group(
snapshot_name, vol['volumeRef'], pools[0]['volumeGroupRef'])
snap_image = self._client.create_snapshot_image(
snap_grp['pitGroupRef'])
LOG.info(_LI("Created snap grp with label %s."), snapshot_name)
except exception.NetAppDriverException:
with excutils.save_and_reraise_exception():
if snap_image is None and snap_grp:
self.delete_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
snap_grp = self._get_snapshot_group_for_snapshot(snapshot['id'])
except exception.NotFound:
LOG.warning(_LW("Snapshot %s already deleted."), snapshot['id'])
return
self._client.delete_snapshot_group(snap_grp['pitGroupRef'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def initialize_connection_iscsi(self, volume, connector):
"""Allow connection to connector and return connection info."""
initiator_name = connector['initiator']
eseries_vol = self._get_volume(volume['name_id'])
mapping = self._map_volume_to_host(eseries_vol, [initiator_name])
lun_id = mapping['lun']
msg = _("Mapped volume %(id)s to the initiator %(initiator_name)s.")
msg_fmt = {'id': volume['id'], 'initiator_name': initiator_name}
LOG.debug(msg % msg_fmt)
iscsi_details = self._get_iscsi_service_details()
iscsi_portal = self._get_iscsi_portal_for_vol(eseries_vol,
iscsi_details)
msg = _("Successfully fetched target details for volume %(id)s and "
"initiator %(initiator_name)s.")
LOG.debug(msg % msg_fmt)
iqn = iscsi_portal['iqn']
address = iscsi_portal['ip']
port = iscsi_portal['tcp_port']
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
Assigns the specified volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '500a098280feeba5',
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5'],
'21000024ff406cc2': ['500a098280feeba5']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5'],
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5',
'500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5',
'500a098180feeba5']
}
}
}
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
eseries_vol = self._get_volume(volume['name_id'])
mapping = self._map_volume_to_host(eseries_vol, initiators)
lun_id = mapping['lun']
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
if target_wwpns:
msg = ("Successfully fetched target details for LUN %(id)s "
"and initiator(s) %(initiators)s.")
msg_fmt = {'id': volume['id'], 'initiators': initiators}
LOG.debug(msg, msg_fmt)
else:
msg = _('Failed to get LUN target details for the LUN %s.')
raise exception.VolumeBackendAPIException(data=msg % volume['id'])
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map}}
return target_info
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:returns: data - the target_wwns and initiator_target_map if the
zone is to be removed, otherwise the same map with
an empty dict for the 'data' key
"""
eseries_vol = self._get_volume(volume['name_id'])
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
host = self._get_host_with_matching_port(initiators)
mappings = eseries_vol.get('listOfMappings', [])
# There can only be one or zero mappings on a volume in E-Series
mapping = mappings[0] if mappings else None
if not mapping:
msg = _("Mapping not found for %(vol)s to host %(ht)s.")
raise exception.NotFound(msg % {'vol': eseries_vol['volumeRef'],
'ht': host['hostRef']})
self._client.delete_volume_mapping(mapping['lunMappingRef'])
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if len(self._client.get_volume_mappings_for_host(
host['hostRef'])) == 0:
# No more exports for this host, so tear down zone.
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map."))
initiator_info = self._build_initiator_target_map_fc(connector)
target_wwpns, initiator_target_map, num_paths = initiator_info
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map_fc(self, connector):
"""Build the target_wwns and the initiator target map."""
# get WWPNs from controller and strip colons
all_target_wwpns = self._client.list_target_wwpns()
all_target_wwpns = [six.text_type(wwpn).replace(':', '')
for wwpn in all_target_wwpns]
target_wwpns = []
init_targ_map = {}
num_paths = 0
if self.lookup_service:
# Use FC SAN lookup to determine which ports are visible.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwpns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwpns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for target in init_targ_map[initiator]:
num_paths += 1
target_wwpns = list(set(target_wwpns))
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwpns
return target_wwpns, init_targ_map, num_paths
def _get_iscsi_service_details(self):
"""Gets iscsi iqn, ip and port information."""
ports = []
hw_inventory = self._client.list_hardware_inventory()
iscsi_ports = hw_inventory.get('iscsiPorts')
if iscsi_ports:
for port in iscsi_ports:
if (port.get('ipv4Enabled') and port.get('iqn') and
port.get('ipv4Data') and
port['ipv4Data'].get('ipv4AddressData') and
port['ipv4Data']['ipv4AddressData']
.get('ipv4Address') and port['ipv4Data']
['ipv4AddressData'].get('configState')
== 'configured'):
iscsi_det = {}
iscsi_det['ip'] =\
port['ipv4Data']['ipv4AddressData']['ipv4Address']
iscsi_det['iqn'] = port['iqn']
iscsi_det['tcp_port'] = port.get('tcpListenPort')
iscsi_det['controller'] = port.get('controllerId')
ports.append(iscsi_det)
if not ports:
msg = _('No good iscsi portals found for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
return ports
def _get_iscsi_portal_for_vol(self, volume, portals, anyController=True):
"""Get the iscsi portal info relevant to volume."""
for portal in portals:
if portal.get('controller') == volume.get('currentManager'):
return portal
if anyController and portals:
return portals[0]
msg = _('No good iscsi portal found in supplied list for %s.')
raise exception.NetAppDriverException(
msg % self._client.get_system_id())
@cinder_utils.synchronized('map_es_volume')
def _map_volume_to_host(self, vol, initiators):
"""Maps the e-series volume to host with initiator."""
host = self._get_or_create_host(initiators, self.host_type)
vol_maps = self._get_host_mapping_for_vol_frm_array(vol)
for vol_map in vol_maps:
if vol_map.get('mapRef') == host['hostRef']:
return vol_map
else:
self._client.delete_volume_mapping(vol_map['lunMappingRef'])
mappings = self._get_vol_mapping_for_host_frm_array(host['hostRef'])
lun = self._get_free_lun(host, mappings)
return self._client.create_volume_mapping(vol['volumeRef'],
host['hostRef'], lun)
def _get_or_create_host(self, port_ids, host_type):
"""Fetch or create a host by given port."""
try:
host = self._get_host_with_matching_port(port_ids)
ht_def = self._get_host_type_definition(host_type)
if host.get('hostTypeIndex') == ht_def.get('index'):
return host
else:
try:
return self._client.update_host_type(
host['hostRef'], ht_def)
except exception.NetAppDriverException as e:
msg = _LW("Unable to update host type for host with "
"label %(l)s. %(e)s")
LOG.warning(msg % {'l': host['label'], 'e': e.msg})
return host
except exception.NotFound as e:
LOG.warning(_LW("Message - %s."), e.msg)
return self._create_host(port_ids, host_type)
def _get_host_with_matching_port(self, port_ids):
"""Gets or creates a host with given port id."""
# Remove any extra colons
port_ids = [six.text_type(wwpn).replace(':', '')
for wwpn in port_ids]
hosts = self._client.list_hosts()
for port_id in port_ids:
for host in hosts:
if host.get('hostSidePorts'):
ports = host.get('hostSidePorts')
for port in ports:
address = port.get('address').upper().replace(':', '')
if address == port_id.upper():
return host
msg = _("Host with ports %(ports)s not found.")
raise exception.NotFound(msg % {'ports': port_ids})
def _create_host(self, port_ids, host_type):
"""Creates host on system with given initiator as port_id."""
LOG.info(_LI("Creating host with ports %s."), port_ids)
host_label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
host_type = self._get_host_type_definition(host_type)
port_type = self.driver_protocol.lower()
return self._client.create_host_with_ports(host_label,
host_type,
port_ids,
port_type=port_type)
def _get_host_type_definition(self, host_type):
"""Gets supported host type if available on storage system."""
host_types = self._client.list_host_types()
for ht in host_types:
if ht.get('name', 'unknown').lower() == host_type.lower():
return ht
raise exception.NotFound(_("Host type %s not supported.") % host_type)
def _get_free_lun(self, host, maps=None):
"""Gets free LUN for given host."""
ref = host['hostRef']
luns = maps or self._get_vol_mapping_for_host_frm_array(ref)
used_luns = set(map(lambda lun: int(lun['lun']), luns))
for lun in xrange(self.MAX_LUNS_PER_HOST):
if lun not in used_luns:
return lun
msg = _("No free LUNs. Host might exceeded max LUNs.")
raise exception.NetAppDriverException(msg)
def _get_vol_mapping_for_host_frm_array(self, host_ref):
"""Gets all volume mappings for given host from array."""
mappings = self._client.get_volume_mappings() or []
host_maps = filter(lambda x: x.get('mapRef') == host_ref, mappings)
return host_maps
def _get_host_mapping_for_vol_frm_array(self, volume):
"""Gets all host mappings for given volume from array."""
mappings = self._client.get_volume_mappings() or []
host_maps = filter(lambda x: x.get('volumeRef') == volume['volumeRef'],
mappings)
return host_maps
def terminate_connection_iscsi(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
eseries_vol = self._get_volume(volume['name_id'])
host = self._get_host_with_matching_port([connector['initiator']])
mapping = self._get_cached_vol_mapping_for_host(eseries_vol, host)
self._client.delete_volume_mapping(mapping['lunMappingRef'])
def _get_cached_vol_mapping_for_host(self, volume, host):
"""Gets cached volume mapping for given host."""
mappings = volume.get('listOfMappings') or []
for mapping in mappings:
if mapping.get('mapRef') == host['hostRef']:
return mapping
msg = _("Mapping not found for %(vol)s to host %(ht)s.")
raise exception.NotFound(msg % {'vol': volume['volumeRef'],
'ht': host['hostRef']})
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service."""
if refresh:
if not self._ssc_stats:
self._update_ssc_info()
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Update volume statistics."""
LOG.debug("Updating volume stats.")
data = dict()
data["volume_backend_name"] = self._backend_name
data["vendor_name"] = "NetApp"
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.driver_protocol
data["pools"] = []
for storage_pool in self._get_storage_pools():
cinder_pool = {}
cinder_pool["pool_name"] = storage_pool.get("label")
cinder_pool["QoS_support"] = False
cinder_pool["reserved_percentage"] = 0
tot_bytes = int(storage_pool.get("totalRaidedSpace", 0))
used_bytes = int(storage_pool.get("usedSpace", 0))
cinder_pool["free_capacity_gb"] = ((tot_bytes - used_bytes) /
units.Gi)
cinder_pool["total_capacity_gb"] = tot_bytes / units.Gi
pool_ssc_stats = self._ssc_stats.get(
storage_pool["volumeGroupRef"])
if pool_ssc_stats:
cinder_pool.update(pool_ssc_stats)
data["pools"].append(cinder_pool)
self._stats = data
self._garbage_collect_tmp_vols()
@cinder_utils.synchronized("netapp_update_ssc_info", external=False)
def _update_ssc_info(self):
"""Periodically runs to update ssc information from the backend.
The self._ssc_stats attribute is updated with the following format.
{<volume_group_ref> : {<ssc_key>: <ssc_value>}}
"""
LOG.info(_LI("Updating storage service catalog information for "
"backend '%s'") % self._backend_name)
self._ssc_stats = \
self._update_ssc_disk_encryption(self._get_storage_pools())
self._ssc_stats = \
self._update_ssc_disk_types(self._get_storage_pools())
def _update_ssc_disk_types(self, volume_groups):
"""Updates the given ssc dictionary with new disk type information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
all_disks = self._client.list_drives()
pool_ids = set(pool.get("volumeGroupRef") for pool in volume_groups)
relevant_disks = filter(lambda x: x.get('currentVolumeGroupRef') in
pool_ids, all_disks)
for drive in relevant_disks:
current_vol_group = drive.get('currentVolumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
if drive.get("driveMediaType") == 'ssd':
ssc_stats[current_vol_group]['netapp_disk_type'] = 'SSD'
else:
disk_type = drive.get('interfaceType').get('driveType')
ssc_stats[current_vol_group]['netapp_disk_type'] = \
self.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
return ssc_stats
def _update_ssc_disk_encryption(self, volume_groups):
"""Updates the given ssc dictionary with new disk encryption information.
:param volume_groups: The volume groups this driver cares about
"""
ssc_stats = copy.deepcopy(self._ssc_stats)
for pool in volume_groups:
current_vol_group = pool.get('volumeGroupRef')
if current_vol_group not in ssc_stats:
ssc_stats[current_vol_group] = {}
ssc_stats[current_vol_group]['netapp_disk_encryption'] = 'true' \
if pool['securityType'] == 'enabled' else 'false'
return ssc_stats
def _get_storage_pools(self):
conf_enabled_pools = []
for value in self.configuration.netapp_storage_pools.split(','):
if value:
conf_enabled_pools.append(value.strip().lower())
filtered_pools = []
storage_pools = self._client.list_storage_pools()
for storage_pool in storage_pools:
# Check if pool can be used
if (storage_pool.get('raidLevel') == 'raidDiskPool'
and storage_pool['label'].lower() in conf_enabled_pools):
filtered_pools.append(storage_pool)
return filtered_pools
def _get_sorted_available_storage_pools(self, size_gb):
"""Returns storage pools sorted on available capacity."""
size = size_gb * units.Gi
sorted_pools = sorted(self._get_storage_pools(), key=lambda x:
(int(x.get('totalRaidedSpace', 0))
- int(x.get('usedSpace', 0))), reverse=True)
avl_pools = filter(lambda x: ((int(x.get('totalRaidedSpace', 0)) -
int(x.get('usedSpace', 0)) >= size)),
sorted_pools)
if not avl_pools:
msg = _LW("No storage pool found with available capacity %s.")
LOG.warning(msg % size_gb)
return avl_pools
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
stage_1, stage_2 = 0, 0
src_vol = self._get_volume(volume['name_id'])
src_label = src_vol['label']
stage_label = 'tmp-%s' % utils.convert_uuid_to_es_fmt(uuid.uuid4())
extend_vol = {'id': uuid.uuid4(), 'size': new_size}
self.create_cloned_volume(extend_vol, volume)
new_vol = self._get_volume(extend_vol['id'])
try:
stage_1 = self._client.update_volume(src_vol['id'], stage_label)
stage_2 = self._client.update_volume(new_vol['id'], src_label)
new_vol = stage_2
LOG.info(_LI('Extended volume with label %s.'), src_label)
except exception.NetAppDriverException:
if stage_1 == 0:
with excutils.save_and_reraise_exception():
self._client.delete_volume(new_vol['id'])
if stage_2 == 0:
with excutils.save_and_reraise_exception():
self._client.update_volume(src_vol['id'], src_label)
self._client.delete_volume(new_vol['id'])
def _garbage_collect_tmp_vols(self):
"""Removes tmp vols with no snapshots."""
try:
if not na_utils.set_safe_attr(self, 'clean_job_running', True):
LOG.warning(_LW('Returning as clean tmp '
'vol job already running.'))
return
for vol in self._client.list_volumes():
label = vol['label']
if (label.startswith('tmp-') and
not self._is_volume_containing_snaps(label)):
try:
self._client.delete_volume(vol['volumeRef'])
except exception.NetAppDriverException as e:
LOG.debug("Error deleting vol with label %s: %s",
(label, e))
finally:
na_utils.set_safe_attr(self, 'clean_job_running', False)
@cinder_utils.synchronized('manage_existing')
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management."""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
label = utils.convert_uuid_to_es_fmt(volume['id'])
if label == vol['label']:
LOG.info(_LI("Volume with given ref %s need not be renamed during"
" manage operation."), existing_ref)
managed_vol = vol
else:
managed_vol = self._client.update_volume(vol['id'], label)
LOG.info(_LI("Manage operation completed for volume with new label"
" %(label)s and wwn %(wwn)s."),
{'label': label, 'wwn': managed_vol[self.WORLDWIDENAME]})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
vol = self._get_existing_vol_with_manage_ref(volume, existing_ref)
return int(math.ceil(float(vol['capacity']) / units.Gi))
def _get_existing_vol_with_manage_ref(self, volume, existing_ref):
try:
return self._get_volume_with_label_wwn(
existing_ref.get('source-name'), existing_ref.get('source-id'))
except exception.InvalidInput:
reason = _('Reference must contain either source-name'
' or source-id element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
except KeyError:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_('Volume not found on configured storage pools.'))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. Logs a
message to indicate the volume is no longer under Cinder's control.
"""
managed_vol = self._get_volume(volume['id'])
LOG.info(_LI("Unmanaged volume with current label %(label)s and wwn "
"%(wwn)s."), {'label': managed_vol['label'],
'wwn': managed_vol[self.WORLDWIDENAME]})<|fim▁end|> |
def _get_volume(self, uid):
label = utils.convert_uuid_to_es_fmt(uid) |
<|file_name|>primitive.rs<|end_file_name|><|fim▁begin|>use core::*;
use walker::*;
macro_rules! unwrap_text {
($s:ident, $e:expr) => {{
match *$e.1 {
Epiq::Text(ref t) => t,
_ => {
let from = $s.printer_printed($e.0);
panic!("{}からtextは取り出せません", from);
},
}
}}
}
macro_rules! unwrap_nmbr {
($s:ident, $e:expr) => {{
match *$e.1 {
Epiq::Uit8(n) => n,
_ => {
let from = $s.printer_printed($e.0);
panic!("{}からnmbrは取り出せません", from);
},
}
}}
}
macro_rules! first_nmbr {
($s:ident, $e:expr) => {{
let first = $s.first($e.clone());
unwrap_nmbr!($s, first)
}}
}
macro_rules! second_nmbr {
($s:ident, $e:expr) => {{
let first = $s.second($e.clone());
unwrap_nmbr!($s, first)
}}
}
impl Walker {
pub fn eq_nmbr(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let n1 = first_nmbr!(self, args);
let n2 = second_nmbr!(self, args);
let new_epiq = if n1 == n2 { Epiq::Tval } else { Epiq::Fval };
alloc_node!(self, new_epiq)
}
pub fn eq_text(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let t1 = self.first(args.clone());
let text1 = unwrap_text!(self, t1);
let t2 = self.second(args.clone());
let text2 = unwrap_text!(self, t2);
let new_epiq = if text1 == text2 { Epiq::Tval } else { Epiq::Fval };
alloc_node!(self, new_epiq)
}
pub fn eq_name(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let t1 = self.first(args.clone());
let name1 = unwrap_name!(self, t1);
let t2 = self.second(args.clone());
let name2 = unwrap_name!(self, t2);
let new_epiq = if name1 == name2 { Epiq::Tval } else { Epiq::Fval };
alloc_node!(self, new_epiq)
}
pub fn print(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let t = self.first(args.clone());
let text = unwrap_text!(self, t);
print!("{}", text);
self.get_epiq(UNIT_INDX)
}
pub fn decrement(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let n = first_nmbr!(self, args);
alloc_node!(self, Epiq::Uit8(n - 1))
}
pub fn plus(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let n1 = first_nmbr!(self, args);
let n2 = second_nmbr!(self, args);
alloc_node!(self, Epiq::Uit8(n1 + n2))
}
pub fn minus(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let n1 = first_nmbr!(self, args);
let n2 = second_nmbr!(self, args);
alloc_node!(self, Epiq::Uit8(n1 - n2))
}
pub fn le_or_eq_nmbr(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let n1 = first_nmbr!(self, args);
let n2 = second_nmbr!(self, args);
let new_epiq = if n1 <= n2 { Epiq::Tval } else { Epiq::Fval };
alloc_node!(self, new_epiq)
}
pub fn concat(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
self.concat_internal(args, "")
}
fn concat_internal(&self, args: Node<Rc<Epiq>>, accum: &str) -> Node<Rc<Epiq>> {
let t = self.first(args.clone());
let text = match *t.1 {
Epiq::Text(ref tt) => tt.clone(),
Epiq::Name(ref tt) => tt.clone(),
Epiq::Uit8(n) => n.to_string(),
_ => {
let from = self.printer_printed(t.0);
panic!("{}からtextは取り出せません", from);<|fim▁hole|> },
};
let accuming = accum.to_string() + &text;
match *self.qval(args.clone()).1 {
Epiq::Unit => alloc_node!(self, Epiq::Text(accuming)),
_ => self.concat_internal(self.qval(args.clone()), &accuming),
}
}
pub fn dbqt(&self, args: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
alloc_node!(self, Epiq::Text("\"".to_string()))
}
fn first(&self, piq: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
self.pval(piq.clone())
}
fn second(&self, piq: Node<Rc<Epiq>>) -> Node<Rc<Epiq>> {
let second_lpiq = self.qval(piq);
self.pval(second_lpiq)
}
}<|fim▁end|> | |
<|file_name|>SeattleExpressLanesView.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2014 Washington State Department of Transportation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the<|fim▁hole|> * along with this program. If not, see <http://www.gnu.org/licenses/>
*
*/
package gov.wa.wsdot.mobile.client.activities.trafficmap.expresslanes;
import gov.wa.wsdot.mobile.shared.ExpressLaneItem;
import java.util.List;
import com.google.gwt.user.client.ui.IsWidget;
import com.googlecode.mgwt.ui.client.widget.base.HasRefresh;
import com.googlecode.mgwt.ui.client.widget.panel.pull.PullArrowWidget;
import com.googlecode.mgwt.ui.client.widget.panel.pull.PullPanel.Pullhandler;
public interface SeattleExpressLanesView extends IsWidget {
public void setPresenter(Presenter presenter);
public interface Presenter {
public void onDoneButtonPressed();
}
public void render(List<ExpressLaneItem> createPostList);
public void showProgressIndicator();
public void hideProgressIndicator();
public void refresh();
public void setHeaderPullHandler(Pullhandler pullHandler);
public PullArrowWidget getPullHeader();
public HasRefresh getPullPanel();
}<|fim▁end|> | * GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License |
<|file_name|>ConverterFactoryLoader.java<|end_file_name|><|fim▁begin|>package kikaha.urouting;
import java.util.*;
import java.util.function.Function;
import javax.annotation.PostConstruct;
import javax.enterprise.inject.Produces;
import javax.enterprise.inject.*;
import javax.inject.*;
import kikaha.core.util.Lang;
import kikaha.urouting.api.*;
@Singleton
@SuppressWarnings("rawtypes")
public class ConverterFactoryLoader {
@Inject
@Typed( AbstractConverter.class )
Iterable<AbstractConverter> availableConverters;
ConverterFactory factory;
@PostConstruct
public void onStartup() {
factory = new ConverterFactory( loadAllConverters() );
}
@Produces
public ConverterFactory produceFactory(){
return factory;
}
public Map<String, AbstractConverter<?>> loadAllConverters() {
final Map<String, AbstractConverter<?>> converters = loadPrimitiveConverters();
for ( final AbstractConverter converter : availableConverters ){
final String canonicalName = converter.getGenericClass().getCanonicalName();
converters.put(canonicalName, converter);
}
return converters;
}
static private Map<String, AbstractConverter<?>> loadPrimitiveConverters(){
final Map<String, AbstractConverter<?>> primitiveConverters = new HashMap<>();
converterFrom( primitiveConverters, int.class, 0, Integer::parseInt );
converterFrom( primitiveConverters, byte.class, (byte)0, Byte::parseByte );
converterFrom( primitiveConverters, float.class, 0f, Float::parseFloat );
converterFrom( primitiveConverters, double.class, 0.0, Double::parseDouble );
converterFrom( primitiveConverters, long.class, 0L, Long::parseLong );
converterFrom( primitiveConverters, short.class, (short)0, Short::parseShort );
converterFrom( primitiveConverters, boolean.class, Boolean.FALSE, Boolean::parseBoolean );
return primitiveConverters;
}
static private <T> void converterFrom(
Map<String, AbstractConverter<?>> primitiveConverters,
Class<T> primitiveType, T defaultValue, Function<String, T> converter)
{
primitiveConverters.put(
primitiveType.getCanonicalName(),
new AbstractConverter<T>() {
@Override
public T convert(String value) throws ConversionException {
if (Lang.isUndefined(value))
return defaultValue;
return converter.apply(value);
}
@Override
public Class<T> getGenericClass() { return primitiveType; }
}<|fim▁hole|><|fim▁end|> | );
}
} |
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.db import models
import datetime
def get_choices(lst):
return [(i, i) for i in lst]
#
# Person
#
pprint_pan = lambda pan: "%s %s %s" % (pan[:5], pan[5:9], pan[9:])
class Person(models.Model):
name = models.CharField(max_length=255, db_index=True)
fathers_name = models.CharField(max_length=255, null=True, blank=True, db_index=True)
status = models.CharField(max_length=32, choices=get_choices([
'Individual',
'HUF',
'Partnership Firm',
'Domestic Company',
'LLP',
'Trust(ITR 7)',
]), default='Individual Salaried')
employer = models.CharField(max_length=64, null=True, blank=True)
self_occupied = models.BooleanField()
pan_number = models.CharField(max_length=32, unique=True)
user_id = models.CharField(max_length=32, null=True, blank=True)
password = models.CharField(max_length=32, null=True, blank=True)
bank_name = models.CharField(max_length=255, null=True, blank=True)
bank_branch = models.CharField(max_length=255, null=True, blank=True)
account_number = models.CharField(max_length=32, null=True, blank=True)
micr = models.CharField(max_length=32, blank=True, null=True)
ifsc_code = models.CharField(max_length=32, null=True, blank=True)
account_type = models.CharField(max_length=32, choices=get_choices(['SB', 'CA', 'CC']), default='SB')
contact_number = models.CharField(max_length=13, null=True, blank=True, db_index=True)
email = models.EmailField(null=True, blank=True, db_index=True)
address = models.TextField(max_length=32, null=True, blank=True)
city = models.CharField(max_length=64, null=True, blank=True, db_index=True)
pincode = models.CharField(max_length=10, null=True, blank=True, db_index=True)
date_of_birth_or_incarnation = models.DateField(null=True, blank=True)<|fim▁hole|> def pan_number_pprint(self):
return pprint_pan(self.pan_number)
pan_number_pprint.admin_order_field = 'pan_number_pprint'
pan_number_pprint.short_description = 'Pan Number'
def _trim(self, *args):
for field in args:
value = getattr(self, field)
setattr(self, field, value.replace(' ', ''))
def save(self):
self._trim('pan_number')
super(Person, self).save()
def __unicode__(self):
return u'%s (%s)' % (self.name, self.pan_number)
class MetadataPerson(models.Model):
person = models.ForeignKey(Person)
key = models.CharField(max_length=250)
value = models.CharField(max_length=250)
#
# Report
#
class Report(models.Model):
finanyr = lambda yr: "%s - %s" % (yr, yr+1)
years = [(finanyr(i), finanyr(i)) for i in xrange(1980, 2020)]
person = models.ForeignKey(Person)
financial_year = models.CharField(max_length=11, choices=years, default=finanyr(datetime.datetime.now().year - 1))
assessment_year = models.CharField(max_length=11, choices=years, default=finanyr(datetime.datetime.now().year))
return_filed_on = models.DateField()
returned_income = models.DecimalField(max_digits=12, decimal_places=2)
#Advanced Tax
july = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
september = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
december = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
march = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
#Interest Detail
interest_234_a = models.DecimalField("Interest 234(a)", max_digits=12, decimal_places=2, null=True, blank=True)
interest_234_b = models.DecimalField("Interest 234(b)", max_digits=12, decimal_places=2, null=True, blank=True)
interest_234_c = models.DecimalField("Interest 234(c)", max_digits=12, decimal_places=2, null=True, blank=True)
#Tax detail
tds = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
self_assessment_tax = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
acknowledgement_number = models.CharField("Ack no.", max_length=64, null=True, blank=True)
#Bill Detail
bill_raised_on = models.DateField(null=True, blank=True)
bill_amount = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
bill_received = models.BooleanField("Bill received ?")
mode_of_payment = models.CharField(max_length=16, choices=get_choices(['Cash', 'Cheque', 'DD', 'Bank Transfer']), null=True, blank=True)
payment_detail = models.CharField(max_length=16, null=True, blank=True)
#Order 143(1)
order_received_on_143_1 = models.DateField("143(1) Order received on", null=True, blank=True)
assessed_income_143_1 = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_143_1 = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
refund_amount_143_1 = models.DecimalField("Refund amount", max_digits=12, decimal_places=2, null=True, blank=True)
demand_raised_amount_143_1 = models.DecimalField("Demand raised for ", max_digits=12, decimal_places=2, null=True, blank=True)
refund_received_on_143_1 = models.DateField("Refund received on", null=True, blank=True)
#Order 143(2)
order_received_on_143_2 = models.DateField("Notice received on", null=True, blank=True)
#Order 143(3)
order_received_on_143_3 = models.DateField("Order received on", null=True, blank=True)
assessed_income_143_3 = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_143_3 = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
refund_amount_143_3 = models.DecimalField("Refund amount", max_digits=12, decimal_places=2, null=True, blank=True)
demand_raised_amount_143_3 = models.DecimalField("Demand raised for", max_digits=12, decimal_places=2, null=True, blank=True)
refund_received_on_143_3 = models.DateField("Refund received on", null=True, blank=True)
#Appeal before cit
filed_on_cit = models.DateField("Filed on", null=True, blank=True)
order_received_on_cit = models.DateField("Order received on", null=True, blank=True)
assessed_income_cit = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_cit = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
#Appeal before tribunal
filed_on_tribunal = models.DateField("Filed on", null=True, blank=True)
order_received_on_tribunal = models.DateField("Order received on", null=True, blank=True)
filed_by_tribunal = models.CharField("Filed by", max_length=16, choices=get_choices(['assessee', 'department']), null=True, blank=True)
assessed_income_tribunal = models.DecimalField("Assessed income", max_digits=12, decimal_places=2, null=True, blank=True)
assessed_tax_tribunal = models.DecimalField("Assessed tax", max_digits=12, decimal_places=2, null=True, blank=True)
def got_reimbursement(self):
return self.refund_amount_143_1 > 0
got_reimbursement.admin_order_field = 'got_reimbursement'
got_reimbursement.boolean = True
got_reimbursement.short_description = 'Got reimbursement ?'
def tax_paid(self):
tax = sum([i for i in (self.march, self.september, self.december, self.july) if i is not None])
if tax == 0 and self.tds is not None:
tax = self.tds
return tax
tax_paid.admin_order_field = 'tax_paid'
tax_paid.boolean = False
tax_paid.short_description = 'Tax Paid'
class Meta:
unique_together = ('person', 'financial_year')
def __unicode__(self):
return u'%s - %s' % (self.person, self.financial_year)
class MetadataReport(models.Model):
report = models.ForeignKey(Report)
key = models.CharField(max_length=250)
value = models.CharField(max_length=250)<|fim▁end|> | |
<|file_name|>tk.py<|end_file_name|><|fim▁begin|># Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tk user interface implementation for namebench."""
__author__ = '[email protected] (Thomas Stromberg)'
import datetime
import os
import Queue
import sys
import threading
import tkFont
# Wildcard imports are evil.
from Tkinter import *
import tkMessageBox
import traceback
import addr_util
import base_ui
import conn_quality
import nameserver_list
import sys_nameservers
import util
THREAD_UNSAFE_TK = 0
LOG_FILE_PATH = util.GenerateOutputFilename('log')
def closedWindowHandler():
print 'Au revoir, mes amis!'
sys.exit(1)
global_message_queue = Queue.Queue()
global_last_message = None
def AddMsg(message, master=None, backup_notifier=None, **kwargs):
"""Add a message to the global queue for output."""
global global_message_queue
global global_last_message
global THREAD_UNSAFE_TK
new_message = StatusMessage(message, **kwargs)
if new_message != global_last_message:
global_message_queue.put(new_message)
if master:
try:
master.event_generate('<<msg>>', when='tail')
global_last_message = new_message
# Tk thread-safety workaround #1
except TclError:
# If we aren't thread safe, we already assume this won't work.
if not THREAD_UNSAFE_TK:
print 'First TCL Error:'
traceback.print_exc()
try:
backup_notifier(-1)
THREAD_UNSAFE_TK = 1
except:
print 'Backup notifier failure:'
traceback.print_exc()
class StatusMessage(object):
"""Messages to be passed from to the main thread from children.
Used to avoid thread issues inherent with Tk.
"""
def __init__(self, message, error=False, count=False, total=False,
enable_button=None, debug=False):
self.message = message
self.error = error
self.count = count
self.debug = debug
self.total = total
self.enable_button = enable_button
class WorkerThread(threading.Thread, base_ui.BaseUI):
"""Handle benchmarking and preparation in a separate UI thread."""
def __init__(self, supplied_ns, global_ns, regional_ns, options, data_source=None, master=None,
backup_notifier=None):
threading.Thread.__init__(self)
self.SetupDataStructures()
self.status_callback = self.msg
self.data_src = data_source
self.backup_notifier = backup_notifier
self.include_internal = False
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns<|fim▁hole|> def msg(self, message, **kwargs):
"""Add messages to the main queue."""
return AddMsg(message, master=self.master, backup_notifier=self.backup_notifier, **kwargs)
def run(self):
self.msg('Started thread', enable_button=False)
try:
self.PrepareTestRecords()
self.PrepareNameServers()
self.PrepareBenchmark()
self.RunAndOpenReports()
except nameserver_list.OutgoingUdpInterception:
(exc_type, exception, tb) = sys.exc_info()
self.msg('Outgoing requests were intercepted!', error=exception)
except nameserver_list.TooFewNameservers:
(exc_type, exception, tb) = sys.exc_info()
self.msg('Too few nameservers to test', error=exception)
except conn_quality.OfflineConnection:
(exc_type, exception, tb) = sys.exc_info()
self.msg('The connection appears to be offline!', error=exception)
except:
(exc_type, exception, tb) = sys.exc_info()
traceback.print_exc(tb)
error_msg = '\n'.join(traceback.format_tb(tb)[-4:])
self.msg(exception, error=error_msg)
self.msg(None, enable_button=True)
class NameBenchGui(object):
"""The main GUI."""
def __init__(self, options, supplied_ns, global_ns, regional_ns, version=None):
self.options = options
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.version = version
def Execute(self):
self.root = Tk()
app = MainWindow(self.root, self.options, self.supplied_ns, self.global_ns,
self.regional_ns, self.version)
app.DrawWindow()
self.root.bind('<<msg>>', app.MessageHandler)
self.root.mainloop()
class MainWindow(Frame, base_ui.BaseUI):
"""The main Tk GUI class."""
def __init__(self, master, options, supplied_ns, global_ns, regional_ns, version=None):
"""TODO(tstromberg): Remove duplication from NameBenchGui class."""
Frame.__init__(self)
self.SetupDataStructures()
self.master = master
self.options = options
self.supplied_ns = supplied_ns
self.global_ns = global_ns
self.regional_ns = regional_ns
self.version = version
try:
self.log_file = open(LOG_FILE_PATH, 'w')
except:
print 'Failed to open %s for write' % LOG_FILE_PATH
self.master.protocol('WM_DELETE_WINDOW', closedWindowHandler)
def UpdateStatus(self, message, count=None, total=None, error=None, debug=False):
"""Update our little status window."""
if not message:
return None
if total:
state = '%s... [%s/%s]' % (message, count, total)
elif count:
state = '%s%s' % (message, '.' * count)
else:
state = message
print '> %s' % str(state)
try:
self.log_file.write('%s: %s\r\n' % (datetime.datetime.now(), state))
self.log_file.flush()
except:
pass
if not debug:
self.status.set(state[0:75])
def DrawWindow(self):
"""Draws the user interface."""
self.nameserver_form = StringVar()
self.status = StringVar()
self.query_count = IntVar()
self.data_source = StringVar()
self.health_performance = StringVar()
self.location = StringVar()
self.use_global = IntVar()
self.use_regional = IntVar()
self.use_censor_checks = IntVar()
self.share_results = IntVar()
self.master.title('namebench')
outer_frame = Frame(self.master)
outer_frame.grid(row=0, padx=16, pady=16)
inner_frame = Frame(outer_frame, relief=GROOVE, bd=2, padx=12, pady=12)
inner_frame.grid(row=0, columnspan=2)
status = Label(outer_frame, text='...', textvariable=self.status)
status.grid(row=15, sticky=W, column=0)
if sys.platform[:3] == 'win':
seperator_width = 490
else:
seperator_width = 585
bold_font = tkFont.Font(font=status['font'])
bold_font['weight'] = 'bold'
ns_label = Label(inner_frame, text='Nameservers')
ns_label.grid(row=0, columnspan=2, sticky=W)
ns_label['font'] = bold_font
nameservers = Entry(inner_frame, bg='white',
textvariable=self.nameserver_form,
width=80)
nameservers.grid(row=1, columnspan=2, sticky=W, padx=4, pady=2)
self.nameserver_form.set(', '.join(nameserver_list.InternalNameServers()))
global_button = Checkbutton(inner_frame,
text='Include global DNS providers (Google Public DNS, OpenDNS, UltraDNS, etc.)',
variable=self.use_global)
global_button.grid(row=2, columnspan=2, sticky=W)
global_button.toggle()
regional_button = Checkbutton(inner_frame,
text='Include best available regional DNS services',
variable=self.use_regional)
regional_button.grid(row=3, columnspan=2, sticky=W)
regional_button.toggle()
separator = Frame(inner_frame, height=2, width=seperator_width, bd=1, relief=SUNKEN)
separator.grid(row=4, padx=5, pady=5, columnspan=2)
ds_label = Label(inner_frame, text='Options')
ds_label.grid(row=5, column=0, sticky=W)
ds_label['font'] = bold_font
censorship_button = Checkbutton(inner_frame, text='Include censorship checks',
variable=self.use_censor_checks)
censorship_button.grid(row=6, columnspan=2, sticky=W)
share_button = Checkbutton(inner_frame,
text='Upload and share your anonymized results (help speed up the internet!)',
variable=self.share_results)
# Old versions of Tk do not support two-dimensional padding.
try:
share_button.grid(row=7, columnspan=2, sticky=W, pady=[0,10])
except TclError:
share_button.grid(row=7, columnspan=2, sticky=W)
loc_label = Label(inner_frame, text='Your location')
loc_label.grid(row=10, column=0, sticky=W)
loc_label['font'] = bold_font
run_count_label = Label(inner_frame, text='Health Check Performance')
run_count_label.grid(row=10, column=1, sticky=W)
run_count_label['font'] = bold_font
self.DiscoverLocation()
self.LoadDataSources()
source_titles = self.data_src.ListSourceTitles()
left_dropdown_width = max([len(x) for x in source_titles]) - 3
location_choices = [self.country, '(Other)']
location = OptionMenu(inner_frame, self.location, *location_choices)
location.configure(width=left_dropdown_width)
location.grid(row=11, column=0, sticky=W)
self.location.set(location_choices[0])
mode_choices = ['Fast', 'Slow (unstable network)']
right_dropdown_width = max([len(x) for x in mode_choices]) - 3
health_performance = OptionMenu(inner_frame, self.health_performance, *mode_choices)
health_performance.configure(width=right_dropdown_width)
health_performance.grid(row=11, column=1, sticky=W)
self.health_performance.set(mode_choices[0])
ds_label = Label(inner_frame, text='Query Data Source')
ds_label.grid(row=12, column=0, sticky=W)
ds_label['font'] = bold_font
numqueries_label = Label(inner_frame, text='Number of queries')
numqueries_label.grid(row=12, column=1, sticky=W)
numqueries_label['font'] = bold_font
data_source = OptionMenu(inner_frame, self.data_source, *source_titles)
data_source.configure(width=left_dropdown_width)
data_source.grid(row=13, column=0, sticky=W)
self.data_source.set(source_titles[0])
query_count = Entry(inner_frame, bg='white', textvariable=self.query_count)
query_count.grid(row=13, column=1, sticky=W, padx=4)
query_count.configure(width=right_dropdown_width + 6)
self.query_count.set(self.options.query_count)
self.button = Button(outer_frame, command=self.StartJob)
self.button.grid(row=15, sticky=E, column=1, pady=4, padx=1)
self.UpdateRunState(running=True)
self.UpdateRunState(running=False)
self.UpdateStatus('namebench %s is ready!' % self.version)
def MessageHandler(self, unused_event):
"""Pinged when there is a new message in our queue to handle."""
while global_message_queue.qsize():
m = global_message_queue.get()
if m.error:
self.ErrorPopup(m.message, m.error)
elif m.enable_button == False:
self.UpdateRunState(running=True)
elif m.enable_button == True:
self.UpdateRunState(running=False)
self.UpdateStatus(m.message, count=m.count, total=m.total, error=m.error, debug=m.debug)
def ErrorPopup(self, title, message):
print 'Showing popup: %s' % title
tkMessageBox.showerror(str(title), str(message), master=self.master)
def UpdateRunState(self, running=True):
"""Update the run state of the window, using nasty threading hacks."""
global THREAD_UNSAFE_TK
# try/except blocks added to work around broken Tcl/Tk libraries
# shipped with Fedora 11 (not thread-safe).
# See http://code.google.com/p/namebench/issues/detail?id=23'
if THREAD_UNSAFE_TK:
return
if running:
try:
self.button.config(state=DISABLED)
self.button.config(text='Running')
except TclError:
THREAD_UNSAFE_TK = True
self.UpdateStatus('Unable to disable button due to broken Tk library')
self.UpdateStatus('Running...')
else:
try:
self.button.config(state=NORMAL)
self.button.config(text='Start Benchmark')
except TclError:
pass
def StartJob(self):
"""Events that get called when the Start button is pressed."""
self.ProcessForm()
thread = WorkerThread(self.supplied_ns, self.global_ns, self.regional_ns, self.options,
data_source=self.data_src,
master=self.master, backup_notifier=self.MessageHandler)
thread.start()
def ProcessForm(self):
"""Read form and populate instance variables."""
self.supplied_ns = addr_util.ExtractIPTuplesFromString(self.nameserver_form.get())
if not self.use_global.get():
self.global_ns = []
if not self.use_regional.get():
self.regional_ns = []
if 'Slow' in self.health_performance.get():
self.options.health_thread_count = 10
self.options.query_count = self.query_count.get()
self.options.input_source = self.data_src.ConvertSourceTitleToType(self.data_source.get())
self.options.enable_censorship_checks = self.use_censor_checks.get()
self.options.upload_results = self.share_results.get()<|fim▁end|> | self.master = master
self.options = options
self.resource_dir = os.path.dirname(os.path.dirname(__file__))
|
<|file_name|>segmentation_smoothing.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright 2021 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {GPGPUProgram, MathBackendWebGL} from '@tensorflow/tfjs-backend-webgl';
import * as tf from '@tensorflow/tfjs-core';
import {SegmentationSmoothingConfig} from './interfaces/config_interfaces';
/**
* A calculator for mixing two segmentation masks together, based on an
* uncertantity probability estimate.
* @param prevMaks Segmentation mask from previous image.
* @param newMask Segmentation mask of current image.
* @param config Contains ratio of amount of previous mask to blend with
* current.
*
* @returns Image mask.
*/
// ref:
// https://github.com/google/mediapipe/blob/master/mediapipe/calculators/image/segmentation_smoothing_calculator.cc
export function smoothSegmentation(
prevMask: tf.Tensor2D, newMask: tf.Tensor2D,
config: SegmentationSmoothingConfig): tf.Tensor2D {
if (tf.getBackend() === 'webgl') {
// Same as implementation in the else case but reduces number of shader
// calls to 1 instead of 17.
return smoothSegmentationWebGL(prevMask, newMask, config);
}
return tf.tidy(() => {
/*
* Assume p := newMaskValue
* H(p) := 1 + (p * log(p) + (1-p) * log(1-p)) / log(2)
* uncertainty alpha(p) =
* Clamp(1 - (1 - H(p)) * (1 - H(p)), 0, 1) [squaring the
* uncertainty]
*
* The following polynomial approximates uncertainty alpha as a
* function of (p + 0.5):
*/
const c1 = 5.68842;
const c2 = -0.748699;
const c3 = -57.8051;
const c4 = 291.309;
const c5 = -624.717;
const t = tf.sub(newMask, 0.5);
const x = tf.square(t);
// Per element calculation is: 1.0 - Math.min(1.0, x * (c1 + x * (c2 + x
// * (c3 + x * (c4 + x * c5))))).
const uncertainty = tf.sub(
1,
tf.minimum(
1,
tf.mul(
x,
tf.add(
c1,
tf.mul(
x,
tf.add(
c2,
tf.mul(
x,
tf.add(
c3,
tf.mul(
x, tf.add(c4, tf.mul(x, c5)))))))))));
// Per element calculation is: newMaskValue + (prevMaskValue -
// newMaskValue) * (uncertainty * combineWithPreviousRatio).
return tf.add(
newMask,
tf.mul(<|fim▁hole|>}
function smoothSegmentationWebGL(
prevMask: tf.Tensor2D, newMask: tf.Tensor2D,
config: SegmentationSmoothingConfig): tf.Tensor2D {
const ratio = config.combineWithPreviousRatio.toFixed(2);
const program: GPGPUProgram = {
variableNames: ['prevMask', 'newMask'],
outputShape: prevMask.shape,
userCode: `
void main() {
ivec2 coords = getOutputCoords();
int height = coords[0];
int width = coords[1];
float prevMaskValue = getPrevMask(height, width);
float newMaskValue = getNewMask(height, width);
/*
* Assume p := newMaskValue
* H(p) := 1 + (p * log(p) + (1-p) * log(1-p)) / log(2)
* uncertainty alpha(p) =
* Clamp(1 - (1 - H(p)) * (1 - H(p)), 0, 1) [squaring the
* uncertainty]
*
* The following polynomial approximates uncertainty alpha as a
* function of (p + 0.5):
*/
const float c1 = 5.68842;
const float c2 = -0.748699;
const float c3 = -57.8051;
const float c4 = 291.309;
const float c5 = -624.717;
float t = newMaskValue - 0.5;
float x = t * t;
float uncertainty =
1.0 - min(1.0, x * (c1 + x * (c2 + x * (c3 + x * (c4 + x * c5)))));
float outputValue = newMaskValue + (prevMaskValue - newMaskValue) *
(uncertainty * ${ratio});
setOutput(outputValue);
}
`
};
const webglBackend = tf.backend() as MathBackendWebGL;
return tf.tidy(() => {
const outputTensorInfo =
webglBackend.compileAndRun(program, [prevMask, newMask]);
return tf.engine().makeTensorFromDataId(
outputTensorInfo.dataId, outputTensorInfo.shape,
outputTensorInfo.dtype) as tf.Tensor2D;
});
}<|fim▁end|> | tf.sub(prevMask, newMask),
tf.mul(uncertainty, config.combineWithPreviousRatio)));
}); |
<|file_name|>DictionaryWriter.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.inputmethod.latin;
import android.content.Context;
import com.android.inputmethod.keyboard.ProximityInfo;
import com.android.inputmethod.latin.SuggestedWords.SuggestedWordInfo;
import com.android.inputmethod.latin.makedict.DictEncoder;
import com.android.inputmethod.latin.makedict.FormatSpec;
import com.android.inputmethod.latin.makedict.FusionDictionary;
import com.android.inputmethod.latin.makedict.FusionDictionary.PtNodeArray;
import com.android.inputmethod.latin.makedict.FusionDictionary.WeightedString;
import com.android.inputmethod.latin.makedict.UnsupportedFormatException;
import com.android.inputmethod.latin.utils.CollectionUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
* An in memory dictionary for memorizing entries and writing a binary dictionary.<|fim▁hole|>public class DictionaryWriter extends AbstractDictionaryWriter {
private static final int BINARY_DICT_VERSION = 3;
private static final FormatSpec.FormatOptions FORMAT_OPTIONS =
new FormatSpec.FormatOptions(BINARY_DICT_VERSION, true /* supportsDynamicUpdate */);
private FusionDictionary mFusionDictionary;
public DictionaryWriter(final Context context, final String dictType) {
super(context, dictType);
clear();
}
@Override
public void clear() {
final HashMap<String, String> attributes = CollectionUtils.newHashMap();
mFusionDictionary = new FusionDictionary(new PtNodeArray(),
new FusionDictionary.DictionaryOptions(attributes, false, false));
}
/**
* Adds a word unigram to the fusion dictionary.
*/
// TODO: Create "cache dictionary" to cache fresh words for frequently updated dictionaries,
// considering performance regression.
@Override
public void addUnigramWord(final String word, final String shortcutTarget, final int frequency,
final int shortcutFreq, final boolean isNotAWord) {
if (shortcutTarget == null) {
mFusionDictionary.add(word, frequency, null, isNotAWord);
} else {
// TODO: Do this in the subclass, with this class taking an arraylist.
final ArrayList<WeightedString> shortcutTargets = CollectionUtils.newArrayList();
shortcutTargets.add(new WeightedString(shortcutTarget, shortcutFreq));
mFusionDictionary.add(word, frequency, shortcutTargets, isNotAWord);
}
}
@Override
public void addBigramWords(final String word0, final String word1, final int frequency,
final boolean isValid, final long lastModifiedTime) {
mFusionDictionary.setBigram(word0, word1, frequency);
}
@Override
public void removeBigramWords(final String word0, final String word1) {
// This class don't support removing bigram words.
}
@Override
protected void writeDictionary(final DictEncoder dictEncoder,
final Map<String, String> attributeMap) throws IOException, UnsupportedFormatException {
for (final Map.Entry<String, String> entry : attributeMap.entrySet()) {
mFusionDictionary.addOptionAttribute(entry.getKey(), entry.getValue());
}
dictEncoder.writeDictionary(mFusionDictionary, FORMAT_OPTIONS);
}
@Override
public ArrayList<SuggestedWordInfo> getSuggestions(final WordComposer composer,
final String prevWord, final ProximityInfo proximityInfo,
boolean blockOffensiveWords, final int[] additionalFeaturesOptions) {
// This class doesn't support suggestion.
return null;
}
@Override
public boolean isValidWord(String word) {
// This class doesn't support dictionary retrieval.
return false;
}
}<|fim▁end|> | */ |
<|file_name|>UsernameFinder.java<|end_file_name|><|fim▁begin|>/*
* Licensed to Apereo under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Apereo licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at the following location:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jasig.portlet.notice.util;
import javax.portlet.PortletRequest;
import javax.servlet.http.HttpServletRequest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.stereotype.Component;
@Component("usernameFinder")
public final class UsernameFinder {
@Value("${UsernameFinder.unauthenticatedUsername}")
private String unauthenticatedUsername = "guest";
private Logger logger = LoggerFactory.getLogger(getClass());
/**
* @deprecated Prefer interactions that are not based on the Portlet API
*/
@Deprecated
public String findUsername(PortletRequest req) {
return req.getRemoteUser() != null
? req.getRemoteUser()
: unauthenticatedUsername;
}
/**
* @since 4.0
*/
public String findUsername(HttpServletRequest request) {
<|fim▁hole|> final Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
logger.trace("Processing the following Authentication object: {}", authentication);
final String rslt = (String) authentication.getPrincipal();
logger.debug("Found username '{}' based on the contents of the SecurityContextHolder", rslt);
// Identification based on Spring Security is required to access Servlet-based APIs
if (rslt == null) {
throw new SecurityException("User not identified");
}
return rslt;
}
/**
* @deprecated Prefer interactions that are not based on the Portlet API
*/
@Deprecated
public boolean isAuthenticated(PortletRequest req) {
return !findUsername(req).equalsIgnoreCase(unauthenticatedUsername);
}
public boolean isAuthenticated(HttpServletRequest request) {
final Authentication authentication = SecurityContextHolder.getContext().getAuthentication();
logger.trace("Processing the following Authentication object: {}", authentication);
return authentication != null && authentication.isAuthenticated();
}
}<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
from setuptools import setup, find_packages
version = '0.2.4'
setup(
name='lolbuddy',
version=version,
description='a cli tool to update league of legends itemsets and ability order from champion.gg',
author='Cyrus Roshan',
author_email='[email protected]',
license='MIT',
keywords=['lol', 'league', 'league of legends', 'item', 'ability'],
url='https://github.com/CyrusRoshan/lolbuddy',
packages=find_packages(),
package_data={},
install_requires=[
'requests-futures >= 0.9.5',
],
entry_points={<|fim▁hole|>)<|fim▁end|> | 'console_scripts': [
'lolbuddy=lolbuddy:main',
],
}, |
<|file_name|>node.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.baremetal.v1 import _common
from openstack import exceptions
from openstack import resource
from openstack import utils
class ValidationResult(object):
"""Result of a single interface validation.
:ivar result: Result of a validation, ``True`` for success, ``False`` for
failure, ``None`` for unsupported interface.
:ivar reason: If ``result`` is ``False`` or ``None``, explanation of
the result.
"""
def __init__(self, result, reason):
self.result = result
self.reason = reason
class Node(_common.ListMixin, resource.Resource):
resources_key = 'nodes'
base_path = '/nodes'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
allow_patch = True
commit_method = 'PATCH'
commit_jsonpatch = True
_query_mapping = resource.QueryParameters(
'associated', 'conductor_group', 'driver', 'fault',
'provision_state', 'resource_class',
fields={'type': _common.fields_type},
instance_id='instance_uuid',
is_maintenance='maintenance',
)
# The allocation_uuid field introduced in 1.52 (Stein).
_max_microversion = '1.52'
# Properties
#: The UUID of the allocation associated with this node. Added in API
#: microversion 1.52.
allocation_id = resource.Body("allocation_uuid")
#: A string or UUID of the tenant who owns the baremetal node. Added in API
#: microversion 1.50.
owner = resource.Body("owner")
#: The UUID of the chassis associated wit this node. Can be empty or None.
chassis_id = resource.Body("chassis_uuid")
#: The current clean step.
clean_step = resource.Body("clean_step")
#: Hostname of the conductor currently handling this ndoe. Added in API
# microversion 1.49.
conductor = resource.Body("conductor")
#: Conductor group this node is managed by. Added in API microversion 1.46.
conductor_group = resource.Body("conductor_group")
#: Timestamp at which the node was last updated.
created_at = resource.Body("created_at")
#: The current deploy step. Added in API microversion 1.44.
deploy_step = resource.Body("deploy_step")
#: The name of the driver.
driver = resource.Body("driver")
#: All the metadata required by the driver to manage this node. List of
#: fields varies between drivers, and can be retrieved from the
#: :class:`openstack.baremetal.v1.driver.Driver` resource.
driver_info = resource.Body("driver_info", type=dict)
#: Internal metadata set and stored by node's driver. This is read-only.
driver_internal_info = resource.Body("driver_internal_info", type=dict)
#: A set of one or more arbitrary metadata key and value pairs.
extra = resource.Body("extra")
#: Fault type that caused the node to enter maintenance mode.
#: Introduced in API microversion 1.42.
fault = resource.Body("fault")
#: The UUID of the node resource.
id = resource.Body("uuid", alternate_id=True)
#: Information used to customize the deployed image, e.g. size of root
#: partition, config drive in the form of base64 encoded string and other
#: metadata.
instance_info = resource.Body("instance_info")
#: UUID of the nova instance associated with this node.
instance_id = resource.Body("instance_uuid")
#: Override enabling of automated cleaning. Added in API microversion 1.47.
is_automated_clean_enabled = resource.Body("automated_clean", type=bool)
#: Whether console access is enabled on this node.
is_console_enabled = resource.Body("console_enabled", type=bool)
#: Whether node is currently in "maintenance mode". Nodes put into
#: maintenance mode are removed from the available resource pool.
is_maintenance = resource.Body("maintenance", type=bool)
# Whether the node is protected from undeploying. Added in API microversion
# 1.48.
is_protected = resource.Body("protected", type=bool)
#: Any error from the most recent transaction that started but failed to
#: finish.
last_error = resource.Body("last_error")
#: A list of relative links, including self and bookmark links.
links = resource.Body("links", type=list)
#: user settable description of the reason why the node was placed into
#: maintenance mode.
maintenance_reason = resource.Body("maintenance_reason")
#: Human readable identifier for the node. May be undefined. Certain words
#: are reserved. Added in API microversion 1.5
name = resource.Body("name")
#: Links to the collection of ports on this node.
ports = resource.Body("ports", type=list)
#: Links to the collection of portgroups on this node. Available since
#: API microversion 1.24.
port_groups = resource.Body("portgroups", type=list)
#: The current power state. Usually "power on" or "power off", but may be
#: "None" if service is unable to determine the power state.
power_state = resource.Body("power_state")
#: Physical characteristics of the node. Content populated by the service
#: during inspection.
properties = resource.Body("properties", type=dict)
# The reason why this node is protected. Added in API microversion 1.48.
protected_reason = resource.Body("protected_reason")
#: The current provisioning state of the node.
provision_state = resource.Body("provision_state")
#: The current RAID configuration of the node.
raid_config = resource.Body("raid_config")
#: The name of an service conductor host which is holding a lock on this
#: node, if a lock is held.
reservation = resource.Body("reservation")
#: A string to be used by external schedulers to identify this node as a
#: unit of a specific type of resource. Added in API microversion 1.21.
resource_class = resource.Body("resource_class")
#: Links to the collection of states.
states = resource.Body("states", type=list)
#: The requested state if a provisioning action has been requested. For
#: example, ``AVAILABLE``, ``DEPLOYING``, ``DEPLOYWAIT``, ``DEPLOYING``,
#: ``ACTIVE`` etc.
target_provision_state = resource.Body("target_provision_state")
#: The requested state during a state transition.
target_power_state = resource.Body("target_power_state")
#: The requested RAID configuration of the node which will be applied when
#: the node next transitions through the CLEANING state.
target_raid_config = resource.Body("target_raid_config")
#: Traits of the node. Introduced in API microversion 1.37.
traits = resource.Body("traits", type=list)
#: Timestamp at which the node was last updated.
updated_at = resource.Body("updated_at")
# Hardware interfaces grouped together for convenience.
#: BIOS interface to use when setting BIOS properties of the node.
#: Introduced in API microversion 1.40.
bios_interface = resource.Body("bios_interface")
#: Boot interface to use when configuring boot of the node.
#: Introduced in API microversion 1.31.
boot_interface = resource.Body("boot_interface")
#: Console interface to use when working with serial console.
#: Introduced in API microversion 1.31.
console_interface = resource.Body("console_interface")
#: Deploy interface to use when deploying the node.
#: Introduced in API microversion 1.31.
deploy_interface = resource.Body("deploy_interface")
#: Inspect interface to use when inspecting the node.
#: Introduced in API microversion 1.31.
inspect_interface = resource.Body("inspect_interface")
#: Management interface to use for management actions on the node.
#: Introduced in API microversion 1.31.
management_interface = resource.Body("management_interface")
#: Network interface provider to use when plumbing the network connections
#: for this node. Introduced in API microversion 1.20.
network_interface = resource.Body("network_interface")
#: Power interface to use for power actions on the node.
#: Introduced in API microversion 1.31.
power_interface = resource.Body("power_interface")
#: RAID interface to use for configuring RAID on the node.
#: Introduced in API microversion 1.31.
raid_interface = resource.Body("raid_interface")
#: Rescue interface to use for rescuing of the node.
#: Introduced in API microversion 1.38.
rescue_interface = resource.Body("rescue_interface")
#: Storage interface to use when attaching remote storage.
#: Introduced in API microversion 1.33.
storage_interface = resource.Body("storage_interface")
#: Vendor interface to use for vendor-specific actions on the node.
#: Introduced in API microversion 1.31.
vendor_interface = resource.Body("vendor_interface")
def _consume_body_attrs(self, attrs):
if 'provision_state' in attrs and attrs['provision_state'] is None:
# API version 1.1 uses None instead of "available". Make it
# consistent.
attrs['provision_state'] = 'available'
return super(Node, self)._consume_body_attrs(attrs)
def create(self, session, *args, **kwargs):
"""Create a remote resource based on this instance.
The overridden version is capable of handling the populated
``provision_state`` field of one of three values: ``enroll``,
``manageable`` or ``available``. The default is currently
``available``, since it's the only state supported by all API versions.
Note that Bare Metal API 1.4 is required for ``manageable`` and
1.11 is required for ``enroll``.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:return: This :class:`Resource` instance.
:raises: ValueError if the Node's ``provision_state`` is not one of
``None``, ``enroll``, ``manageable`` or ``available``.
:raises: :exc:`~openstack.exceptions.NotSupported` if
the ``provision_state`` cannot be reached with any API version
supported by the server.
"""
expected_provision_state = self.provision_state
if expected_provision_state is None:
expected_provision_state = 'available'
if expected_provision_state not in ('enroll',
'manageable',
'available'):
raise ValueError(
"Node's provision_state must be one of 'enroll', "
"'manageable' or 'available' for creation, got %s" %
expected_provision_state)
session = self._get_session(session)
# Verify that the requested provision state is reachable with the API
# version we are going to use.<|fim▁hole|> expected_version = _common.STATE_VERSIONS[expected_provision_state]
except KeyError:
pass
else:
self._assert_microversion_for(
session, 'create', expected_version,
error_message="Cannot create a node with initial provision "
"state %s" % expected_provision_state)
# Ironic cannot set provision_state itself, so marking it as unchanged
self._clean_body_attrs({'provision_state'})
super(Node, self).create(session, *args, **kwargs)
if (self.provision_state == 'enroll'
and expected_provision_state != 'enroll'):
self.set_provision_state(session, 'manage', wait=True)
if (self.provision_state == 'manageable'
and expected_provision_state == 'available'):
self.set_provision_state(session, 'provide', wait=True)
if (self.provision_state == 'available'
and expected_provision_state == 'manageable'):
self.set_provision_state(session, 'manage', wait=True)
return self
def commit(self, session, *args, **kwargs):
"""Commit the state of the instance to the remote resource.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:return: This :class:`Node` instance.
"""
# These fields have to be set through separate API.
if ('maintenance_reason' in self._body.dirty
or 'maintenance' in self._body.dirty):
if not self.is_maintenance and self.maintenance_reason:
if 'maintenance' in self._body.dirty:
self.maintenance_reason = None
else:
raise ValueError('Maintenance reason cannot be set when '
'maintenance is False')
if self.is_maintenance:
self._do_maintenance_action(
session, 'put', {'reason': self.maintenance_reason})
else:
# This corresponds to setting maintenance=False and
# maintenance_reason=None in the same request.
self._do_maintenance_action(session, 'delete')
self._clean_body_attrs({'maintenance', 'maintenance_reason'})
if not self.requires_commit:
# Other fields are not updated, re-fetch the node to reflect
# the new status.
return self.fetch(session)
return super(Node, self).commit(session, *args, **kwargs)
def set_provision_state(self, session, target, config_drive=None,
clean_steps=None, rescue_password=None,
wait=False, timeout=None):
"""Run an action modifying this node's provision state.
This call is asynchronous, it will return success as soon as the Bare
Metal service acknowledges the request.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param target: Provisioning action, e.g. ``active``, ``provide``.
See the Bare Metal service documentation for available actions.
:param config_drive: Config drive to pass to the node, only valid
for ``active` and ``rebuild`` targets. You can use functions from
:mod:`openstack.baremetal.configdrive` to build it.
:param clean_steps: Clean steps to execute, only valid for ``clean``
target.
:param rescue_password: Password for the rescue operation, only valid
for ``rescue`` target.
:param wait: Whether to wait for the target state to be reached.
:param timeout: Timeout (in seconds) to wait for the target state to be
reached. If ``None``, wait without timeout.
:return: This :class:`Node` instance.
:raises: ValueError if ``config_drive``, ``clean_steps`` or
``rescue_password`` are provided with an invalid ``target``.
:raises: :class:`~openstack.exceptions.ResourceFailure` if the node
reaches an error state while waiting for the state.
:raises: :class:`~openstack.exceptions.ResourceTimeout` if timeout
is reached while waiting for the state.
"""
session = self._get_session(session)
version = None
if target in _common.PROVISIONING_VERSIONS:
version = '1.%d' % _common.PROVISIONING_VERSIONS[target]
if config_drive:
# Some config drive actions require a higher version.
if isinstance(config_drive, dict):
version = '1.56'
elif target == 'rebuild':
version = '1.35'
version = utils.pick_microversion(session, version)
body = {'target': target}
if config_drive:
if target not in ('active', 'rebuild'):
raise ValueError('Config drive can only be provided with '
'"active" and "rebuild" targets')
# Not a typo - ironic accepts "configdrive" (without underscore)
body['configdrive'] = config_drive
if clean_steps is not None:
if target != 'clean':
raise ValueError('Clean steps can only be provided with '
'"clean" target')
body['clean_steps'] = clean_steps
if rescue_password is not None:
if target != 'rescue':
raise ValueError('Rescue password can only be provided with '
'"rescue" target')
body['rescue_password'] = rescue_password
if wait:
try:
expected_state = _common.EXPECTED_STATES[target]
except KeyError:
raise ValueError('For target %s the expected state is not '
'known, cannot wait for it' % target)
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'states', 'provision')
response = session.put(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
msg = ("Failed to set provision state for bare metal node {node} "
"to {target}".format(node=self.id, target=target))
exceptions.raise_from_response(response, error_message=msg)
if wait:
return self.wait_for_provision_state(session,
expected_state,
timeout=timeout)
else:
return self.fetch(session)
def wait_for_provision_state(self, session, expected_state, timeout=None,
abort_on_failed_state=True):
"""Wait for the node to reach the expected state.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param expected_state: The expected provisioning state to reach.
:param timeout: If ``wait`` is set to ``True``, specifies how much (in
seconds) to wait for the expected state to be reached. The value of
``None`` (the default) means no client-side timeout.
:param abort_on_failed_state: If ``True`` (the default), abort waiting
if the node reaches a failure state which does not match the
expected one. Note that the failure state for ``enroll`` ->
``manageable`` transition is ``enroll`` again.
:return: This :class:`Node` instance.
:raises: :class:`~openstack.exceptions.ResourceFailure` if the node
reaches an error state and ``abort_on_failed_state`` is ``True``.
:raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout.
"""
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for node %(node)s to reach "
"target state '%(state)s'" % {'node': self.id,
'state': expected_state}):
self.fetch(session)
if self._check_state_reached(session, expected_state,
abort_on_failed_state):
return self
session.log.debug(
'Still waiting for node %(node)s to reach state '
'"%(target)s", the current state is "%(state)s"',
{'node': self.id, 'target': expected_state,
'state': self.provision_state})
def wait_for_reservation(self, session, timeout=None):
"""Wait for a lock on the node to be released.
Bare metal nodes in ironic have a reservation lock that
is used to represent that a conductor has locked the node
while performing some sort of action, such as changing
configuration as a result of a machine state change.
This lock can occur during power syncronization, and prevents
updates to objects attached to the node, such as ports.
Note that nothing prevents a conductor from acquiring the lock again
after this call returns, so it should be treated as best effort.
Returns immediately if there is no reservation on the node.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param timeout: How much (in seconds) to wait for the lock to be
released. The value of ``None`` (the default) means no timeout.
:return: This :class:`Node` instance.
"""
if self.reservation is None:
return self
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for the lock to be released on node %s" %
self.id):
self.fetch(session)
if self.reservation is None:
return self
session.log.debug(
'Still waiting for the lock to be released on node '
'%(node)s, currently locked by conductor %(host)s',
{'node': self.id, 'host': self.reservation})
def _check_state_reached(self, session, expected_state,
abort_on_failed_state=True):
"""Wait for the node to reach the expected state.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param expected_state: The expected provisioning state to reach.
:param abort_on_failed_state: If ``True`` (the default), abort waiting
if the node reaches a failure state which does not match the
expected one. Note that the failure state for ``enroll`` ->
``manageable`` transition is ``enroll`` again.
:return: ``True`` if the target state is reached
:raises: :class:`~openstack.exceptions.ResourceFailure` if the node
reaches an error state and ``abort_on_failed_state`` is ``True``.
"""
# NOTE(dtantsur): microversion 1.2 changed None to available
if (self.provision_state == expected_state
or (expected_state == 'available'
and self.provision_state is None)):
return True
elif not abort_on_failed_state:
return False
if (self.provision_state.endswith(' failed') or
self.provision_state == 'error'):
raise exceptions.ResourceFailure(
"Node %(node)s reached failure state \"%(state)s\"; "
"the last error is %(error)s" %
{'node': self.id, 'state': self.provision_state,
'error': self.last_error})
# Special case: a failure state for "manage" transition can be
# "enroll"
elif (expected_state == 'manageable'
and self.provision_state == 'enroll' and self.last_error):
raise exceptions.ResourceFailure(
"Node %(node)s could not reach state manageable: "
"failed to verify management credentials; "
"the last error is %(error)s" %
{'node': self.id, 'error': self.last_error})
# TODO(dtantsur): waiting for power state
def set_power_state(self, session, target):
"""Run an action modifying this node's power state.
This call is asynchronous, it will return success as soon as the Bare
Metal service acknowledges the request.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param target: Target power state, e.g. "rebooting", "power on".
See the Bare Metal service documentation for available actions.
"""
session = self._get_session(session)
if target.startswith("soft "):
version = '1.27'
else:
version = None
version = utils.pick_microversion(session, version)
# TODO(dtantsur): server timeout support
body = {'target': target}
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'states', 'power')
response = session.put(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
msg = ("Failed to set power state for bare metal node {node} "
"to {target}".format(node=self.id, target=target))
exceptions.raise_from_response(response, error_message=msg)
def attach_vif(self, session, vif_id, retry_on_conflict=True):
"""Attach a VIF to the node.
The exact form of the VIF ID depends on the network interface used by
the node. In the most common case it is a Network service port
(NOT a Bare Metal port) ID. A VIF can only be attached to one node
at a time.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param string vif_id: Backend-specific VIF ID.
:param retry_on_conflict: Whether to retry HTTP CONFLICT errors.
This can happen when either the VIF is already used on a node or
the node is locked. Since the latter happens more often, the
default value is True.
:return: ``None``
:raises: :exc:`~openstack.exceptions.NotSupported` if the server
does not support the VIF API.
"""
session = self._get_session(session)
version = self._assert_microversion_for(
session, 'commit', _common.VIF_VERSION,
error_message=("Cannot use VIF attachment API"))
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'vifs')
body = {'id': vif_id}
retriable_status_codes = _common.RETRIABLE_STATUS_CODES
if not retry_on_conflict:
retriable_status_codes = set(retriable_status_codes) - {409}
response = session.post(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=retriable_status_codes)
msg = ("Failed to attach VIF {vif} to bare metal node {node}"
.format(node=self.id, vif=vif_id))
exceptions.raise_from_response(response, error_message=msg)
def detach_vif(self, session, vif_id, ignore_missing=True):
"""Detach a VIF from the node.
The exact form of the VIF ID depends on the network interface used by
the node. In the most common case it is a Network service port
(NOT a Bare Metal port) ID.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param string vif_id: Backend-specific VIF ID.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the VIF does not exist. Otherwise, ``False``
is returned.
:return: ``True`` if the VIF was detached, otherwise ``False``.
:raises: :exc:`~openstack.exceptions.NotSupported` if the server
does not support the VIF API.
"""
session = self._get_session(session)
version = self._assert_microversion_for(
session, 'commit', _common.VIF_VERSION,
error_message=("Cannot use VIF attachment API"))
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'vifs', vif_id)
response = session.delete(
request.url, headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
if ignore_missing and response.status_code == 400:
session.log.debug(
'VIF %(vif)s was already removed from node %(node)s',
{'vif': vif_id, 'node': self.id})
return False
msg = ("Failed to detach VIF {vif} from bare metal node {node}"
.format(node=self.id, vif=vif_id))
exceptions.raise_from_response(response, error_message=msg)
return True
def list_vifs(self, session):
"""List IDs of VIFs attached to the node.
The exact form of the VIF ID depends on the network interface used by
the node. In the most common case it is a Network service port
(NOT a Bare Metal port) ID.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:return: List of VIF IDs as strings.
:raises: :exc:`~openstack.exceptions.NotSupported` if the server
does not support the VIF API.
"""
session = self._get_session(session)
version = self._assert_microversion_for(
session, 'fetch', _common.VIF_VERSION,
error_message=("Cannot use VIF attachment API"))
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'vifs')
response = session.get(
request.url, headers=request.headers, microversion=version)
msg = ("Failed to list VIFs attached to bare metal node {node}"
.format(node=self.id))
exceptions.raise_from_response(response, error_message=msg)
return [vif['id'] for vif in response.json()['vifs']]
def validate(self, session, required=('boot', 'deploy', 'power')):
"""Validate required information on a node.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param required: List of interfaces that are required to pass
validation. The default value is the list of minimum required
interfaces for provisioning.
:return: dict mapping interface names to :class:`ValidationResult`
objects.
:raises: :exc:`~openstack.exceptions.ValidationException` if validation
fails for a required interface.
"""
session = self._get_session(session)
version = self._get_microversion_for(session, 'fetch')
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'validate')
response = session.get(request.url, headers=request.headers,
microversion=version)
msg = ("Failed to validate node {node}".format(node=self.id))
exceptions.raise_from_response(response, error_message=msg)
result = response.json()
if required:
failed = [
'%s (%s)' % (key, value.get('reason', 'no reason'))
for key, value in result.items()
if key in required and not value.get('result')
]
if failed:
raise exceptions.ValidationException(
'Validation failed for required interfaces of node {node}:'
' {failures}'.format(node=self.id,
failures=', '.join(failed)))
return {key: ValidationResult(value.get('result'), value.get('reason'))
for key, value in result.items()}
def set_maintenance(self, session, reason=None):
"""Enable maintenance mode on the node.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param reason: Optional reason for maintenance.
:return: This :class:`Node` instance.
"""
self._do_maintenance_action(session, 'put', {'reason': reason})
return self.fetch(session)
def unset_maintenance(self, session):
"""Disable maintenance mode on the node.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:return: This :class:`Node` instance.
"""
self._do_maintenance_action(session, 'delete')
return self.fetch(session)
def _do_maintenance_action(self, session, verb, body=None):
session = self._get_session(session)
version = self._get_microversion_for(session, 'commit')
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'maintenance')
response = getattr(session, verb)(
request.url, json=body,
headers=request.headers, microversion=version)
msg = ("Failed to change maintenance mode for node {node}"
.format(node=self.id))
exceptions.raise_from_response(response, error_message=msg)
def set_boot_device(self, session, boot_device, persistent=False):
"""Set node boot device
:param session: The session to use for making this request.
:param boot_device: Boot device to assign to the node.
:param persistent: If the boot device change is maintained after node
reboot
:return: The updated :class:`~openstack.baremetal.v1.node.Node`
"""
session = self._get_session(session)
version = self._get_microversion_for(session, 'commit')
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'management', 'boot_device')
body = {'boot_device': boot_device, 'persistent': persistent}
response = session.put(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
msg = ("Failed to set boot device for node {node}"
.format(node=self.id))
exceptions.raise_from_response(response, error_message=msg)
def add_trait(self, session, trait):
"""Add a trait to a node.
:param session: The session to use for making this request.
:param trait: The trait to add to the node.
:returns: The updated :class:`~openstack.baremetal.v1.node.Node`
"""
session = self._get_session(session)
version = utils.pick_microversion(session, '1.37')
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'traits', trait)
response = session.put(
request.url, json=None,
headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
msg = ("Failed to add trait {trait} for node {node}"
.format(trait=trait, node=self.id))
exceptions.raise_from_response(response, error_message=msg)
self.traits = list(set(self.traits or ()) | {trait})
def remove_trait(self, session, trait, ignore_missing=True):
"""Remove a trait from a node.
:param session: The session to use for making this request.
:param trait: The trait to remove from the node.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the trait does not exist.
Otherwise, ``False`` is returned.
:returns: The updated :class:`~openstack.baremetal.v1.node.Node`
"""
session = self._get_session(session)
version = utils.pick_microversion(session, '1.37')
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'traits', trait)
response = session.delete(
request.url, headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
if ignore_missing or response.status_code == 400:
session.log.debug(
'Trait %(trait)s was already removed from node %(node)s',
{'trait': trait, 'node': self.id})
return False
msg = ("Failed to remove trait {trait} from bare metal node {node}"
.format(node=self.id, trait=trait))
exceptions.raise_from_response(response, error_message=msg)
self.traits = list(set(self.traits) - {trait})
return True
def set_traits(self, session, traits):
"""Set traits for a node.
Removes any existing traits and adds the traits passed in to this
method.
:param session: The session to use for making this request.
:param traits: list of traits to add to the node.
:returns: The updated :class:`~openstack.baremetal.v1.node.Node`
"""
session = self._get_session(session)
version = utils.pick_microversion(session, '1.37')
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'traits')
body = {'traits': traits}
response = session.put(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
msg = ("Failed to set traits for node {node}"
.format(node=self.id))
exceptions.raise_from_response(response, error_message=msg)
self.traits = traits
NodeDetail = Node<|fim▁end|> | try: |
<|file_name|>Fantasma.java<|end_file_name|><|fim▁begin|>package jrpsoft;
import java.awt.Color;
import java.awt.Graphics2D;
import java.awt.Point;
import java.awt.geom.Ellipse2D;
import java.util.Random;
public class Fantasma extends Actor {
protected static final int FANTASMA_SPEED = 1;
public boolean up, down, right, left;
static Boolean[] dir = new Boolean[4];
int avazarA = 0;
Random random;
public Fantasma(Point puntoIncio, Color colorPrincipal) {
super(puntoIncio, colorPrincipal);
random = new Random();
}
public Point getPoint() {
return p;
}
public void paint(Graphics2D g2) {
g2.setColor(color);
Point pixelPoint = Director.getPxOfCell(p);
Ellipse2D fantasma = new Ellipse2D.Float(pixelPoint.x, pixelPoint.y,
diametro, diametro);
g2.fill(fantasma);
g2.fill(new Ellipse2D.Float(pixelPoint.x - 1, pixelPoint.y + 12, diametro / 2, diametro / 2));
g2.fill(new Ellipse2D.Float(pixelPoint.x + 5, pixelPoint.y + 12, diametro / 2, diametro / 2));
g2.fill(new Ellipse2D.Float(pixelPoint.x + 11, pixelPoint.y + 12, diametro / 2, diametro / 2));
}
public void mover(Pacman pacman, Tablero tablero) {
/*
* System.out.println("ee "+(random.nextInt(5)));
* if(random.nextInt(5)==0){ avanzar((random.nextInt(4)+1),tablero); }
*/
// avazarA=movAleatorio(tablero);
//System.err.println(p);
// avazarA=0;
Astar.getAstar().getPath(p, pacman.p);
Point nextPoint=Astar.getAstar().getNextPoint();
avanzar(getDirToPoint(nextPoint), tablero);
}
/*@SuppressWarnings("unused")
private int movAleatorio(Tablero tablero) {
Point aux = (Point) p.clone();
int randDir = 0;
do {
aux = reverseTranslateDir(aux, randDir);
randDir = random.nextInt(4) + 1;
translateDir(aux, randDir);
// System.out.print("\nwhiling"+randDir+" px:"+aux.x+" py:"+aux.y);
} while (!tablero.isWalkable(aux));
return randDir;
}*/
private void avanzar(int dir, Tablero tablero) {
p=translateDir(p,dir);
/*Point anterior = (Point) p.clone();
translateDir(p, dir);
if (!tablero.isWalkable(p)) {
p = anterior;
}*/
}
public Point translateDir(Point p, int dir) {
switch (dir) {
case DUP:
p.y += UP;
break;
case DDOWN:
p.y += DOWN;<|fim▁hole|> case DLEFT:
p.x += LEFT;
break;
case DRIGHT:
p.x += RIGHT;
break;
default:
break;
}
return p;
}
/*
public Point reverseTranslateDir(Point p, int dir) {
switch (dir) {
case DUP:
p.y -= UP;
break;
case DDOWN:
p.y -= DOWN;
break;
case DLEFT:
p.x -= LEFT;
break;
case DRIGHT:
p.x -= RIGHT;
break;
default:
break;
}
return p;
}
*/
}<|fim▁end|> | break; |
<|file_name|>test_iam2_project_vpc_cascade_delete.py<|end_file_name|><|fim▁begin|>'''
@auther:fangxiao
'''
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.affinitygroup_operations as ag_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vxlan_operations as vxlan_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.tag_operations as tag_ops
import zstackwoodpecker.operations.deploy_operations as dep_ops
import zstackwoodpecker.operations.vpcdns_operations as vpcdns_ops
import apibinding.inventory as inventory
import zstackwoodpecker.operations.vpc_operations as vpc_ops
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_vxlan_network_uuid = None
project_uuid = None
project_operator_uuid = None
vni_range_uuid = None
vxlan_pool_uuid = None
l3_vpc_network_uuid = None
dns_text = '223.5.5.5'
allservices = ["VRouterRoute","DHCP","IPsec","LoadBalancer","CentralizedDNS","Eip","DNS","SNAT","VipQos","PortForwarding"]
cond = res_ops.gen_query_conditions("type","=","vrouter")
network_service_provider_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER,cond)[0].uuid
def create_l3_vpc(name,l2_uuid,session_uuid = None):
action = api_actions.CreateL3NetworkAction()
action.name = name
action.l2NetworkUuid = l2_uuid
action.timeout = 300000
action.type = inventory.VPC_L3_NETWORK_TYPE
action.sessionUuid = session_uuid
evt = acc_ops.execute_action_with_session(action,session_uuid)
test_util.action_logger('[l3:] %s is created' %name)
return evt.inventory
def AddDnsToL3Network(l3_network_uuid,dns_text,session_uuid = None):
action = api_actions.AddDnsToL3NetworkAction()
action.sessionUuid = session_uuid
action.dns = dns_text
action.l3NetworkUuid = l3_network_uuid
evt = acc_ops.execute_action_with_session(action,session_uuid)
test_util.action_logger('add dns to l3 network: %s' % l3_network_uuid)
return evt
def AttachNetworkServiceToL3Network(l3_network_uuid,allservices,session_uuid = None):
action = api_actions.AttachNetworkServiceToL3NetworkAction()
action.sessionUuid = session_uuid
action.l3NetworkUuid = l3_network_uuid
action.networkServices = {network_service_provider_uuid:allservices}
evt = acc_ops.execute_action_with_session(action,session_uuid)
test_util.action_logger('add network services to l3 network: %s' % l3_network_uuid)
return evt
def test():
global l2_vxlan_network_uuid,project_uuid,project_operator_uuid,vni_range_uuid,vxlan_pool_uuid,l3_vpc_network_uuid
# create vxlan pool and vni range
zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid
cluster_uuid = res_ops.get_resource(res_ops.CLUSTER)[0].uuid
vxlan_pool_name = 'vxlan_pool_name'
vxlan_pool_uuid = vxlan_ops.create_l2_vxlan_network_pool(vxlan_pool_name,zone_uuid).uuid
vxlan_ops.create_vni_range('vni_range',20,40,vxlan_pool_uuid)
systemTags = ["l2NetworkUuid::%s::clusterUuid::%s::cidr::{172.20.0.1/16}"%(vxlan_pool_uuid,cluster_uuid)]
net_ops.attach_l2_vxlan_pool(vxlan_pool_uuid,cluster_uuid,systemTags)
# 1 create project
project_name = 'test_project7'
project = iam2_ops.create_iam2_project(project_name)
project_uuid = project.uuid
#cond = res_ops.gen_query_conditions("name",'=',"test_project7")
#linked_account_uuid = res_ops.query_resource(res_ops.ACCOUNT,cond)[0].uuid
linked_account_uuid = project.linkedAccountUuid
# 2 create project operator
project_operator_name = 'username7'
project_operator_password = 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86'
attributes = [{"name": "__ProjectOperator__", "value": project_uuid}]
project_operator_uuid = iam2_ops.create_iam2_virtual_id(project_operator_name,project_operator_password,attributes=attributes).uuid
zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid
attributes = [{"name": "__ProjectRelatedZone__", "value": zone_uuid}]
iam2_ops.add_attributes_to_iam2_project(project_uuid, attributes)
# 3 login in project by project operator
iam2_ops.add_iam2_virtual_ids_to_project([project_operator_uuid],project_uuid)
project_operator_session_uuid = iam2_ops.login_iam2_virtual_id(project_operator_name,project_operator_password)
project_login_uuid = iam2_ops.login_iam2_project(project_name,session_uuid=project_operator_session_uuid).uuid
# 4 share vxlan pool to project
l2vxlan_pools = res_ops.query_resource(res_ops.L2_VXLAN_NETWORK_POOL)
for l2vxlan_pool in l2vxlan_pools:
acc_ops.share_resources([linked_account_uuid],[l2vxlan_pool.uuid])
# 5 create l2 vxlan
l2_vxlan_network_uuid = vxlan_ops.create_l2_vxlan_network('l2_vxlan',vxlan_pool_uuid,zone_uuid,session_uuid=project_login_uuid).uuid
# 6 use l2 vxlan to create l3 vpc
l3_vpc_network = create_l3_vpc('test_vpc',l2_vxlan_network_uuid,project_login_uuid)
l3_vpc_network_uuid = l3_vpc_network.uuid
# add ip range
ir_option = test_util.IpRangeOption()
ir_option.set_name('iprange2')
ir_option.set_description('iprange for vpc')
ir_option.set_netmask('255.255.255.0')
ir_option.set_gateway('192.168.23.1')
ir_option.set_l3_uuid(l3_vpc_network_uuid)
ir_option.set_startIp('192.168.23.2')
ir_option.set_endIp('192.168.23.254')
net_ops.add_ip_range(ir_option)
# add network service
AttachNetworkServiceToL3Network(l3_vpc_network_uuid,allservices,session_uuid = project_login_uuid)
# share the vr_offering to project and do create vpc router and vpc network
cond = res_ops.gen_query_conditions("name",'=',"virtual-router-vm")
vr_offering_uuid = res_ops.query_resource(res_ops.VR_OFFERING,cond)[0].uuid
acc_ops.share_resources([linked_account_uuid],[vr_offering_uuid])
vpc_ops.create_vpc_vrouter(name = 'test_vpc_vr', virtualrouter_offering_uuid = vr_offering_uuid,session_uuid = project_login_uuid)
vpc_vr = test_stub.query_vpc_vrouter('test_vpc_vr')
vpc_vr.add_nic(l3_vpc_network_uuid)
# 7 expunge the project and check the l2 vxlan
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
try:
l2_vxlan_network_test_uuid = res_ops.query_resource(res_ops.L2_VXLAN_NETWORK)[0].uuid
except:
test_util.test_pass(
"l2 vxlan is delete after deleted the project " )
test_util.test_dsc('test l2 l2 cascade delete')
# 8 check the vpc network and vpc_vr
try:
cond = res_ops.gen_query_conditions("name",'=',"test_vpc")
l3_vpc_network_uuid = res_ops.query_resource(res_ops.L3_NETWORK,cond)[0].uuid
except:
test_util.test_pass(
"l3_vpc is delete after deleted the project")
cond = res_ops.gen_query_conditions("name",'=',"test_vpc_vr")
vpc_vr = res_ops.query_resource(res_ops.VIRTUALROUTER_VM,cond)
if not vpc_vr.inv.state is 'Paused':
test_util.test_fail(
"vpc vr [%s] is still exist after delete and expunge the project [%s]" % (vpc_vr.uuid,project_uuid))
# 9 delete
vni_range_uuid = res_ops.get_resource(res_ops.VNI_RANGE)[0].uuid
vxlan_ops.delete_vni_range(vni_range_uuid)
net_ops.delete_l2(vxlan_pool_uuid)
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
def error_cleanup():
if project_uuid:
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
if project_operator_uuid:
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
if l2_vxlan_network_uuid:
net_ops.delete_l2(l2_vxlan_network_uuid)
if vni_range_uuid:
vxlan_ops.delete_vni_range(vni_range_uuid)
if vxlan_pool_uuid:
net_ops.delete_l2(vxlan_pool_uuid)<|fim▁hole|><|fim▁end|> | if l3_vpc_network_uuid:
net_ops.delete_l3(l3_vpc_network_uuid) |
<|file_name|>test_group.py<|end_file_name|><|fim▁begin|>"""
Test for the bandicoot.helper.group module.
"""
import bandicoot as bc
from bandicoot.core import Record, Position
import unittest
import datetime
from bandicoot.tests.generate_user import random_burst
from bandicoot.helper.group import group_records
from bandicoot.helper.tools import std, mean
from datetime import timedelta
import numpy as np
import os
class TestGroup(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._dir_changed = False
def setUp(self):
if not TestGroup._dir_changed:
abspath = os.path.abspath(__file__)
name = abspath.index(os.path.basename(__file__))
abspath = abspath[:name]
os.chdir(abspath)
TestGroup._dir_changed = True
self.maxDiff = None
self.user = bc.io.read_orange("u_test", "samples", describe=False)
self.random_int_list = np.random.randint(1, 1000, size=9001)
self.sum_stats_list = [bc.helper.tools.SummaryStats(np.random.rand(), np.random.rand(),
np.random.rand(), np.random.rand(), np.random.rand(), np.random.rand(), np.random.rand(), []) for _ in range(9001)]
def test_statistics(self):
self.assertDictEqual(bc.helper.group.statistics(self.random_int_list, summary='default'), {
'mean': mean(self.random_int_list),
'std': std(self.random_int_list),
})
def mean_std(key):
return {
'mean': mean([getattr(s, key) for s in self.sum_stats_list]),
'std': std([getattr(s, key) for s in self.sum_stats_list]),
}
self.assertDictEqual(bc.helper.group.statistics(self.sum_stats_list, summary='extended'), {
'kurtosis': mean_std('kurtosis'),
'mean': mean_std('mean'),
'median': mean_std('median'),
'skewness': mean_std('skewness'),
'std': mean_std('std'),
'min': mean_std('min'),
'max': mean_std('max')
})
self.assertEqual(bc.helper.group.statistics([]).values(), [None] * 2)<|fim▁hole|> def run_bad_aggregated():
try:
bc.helper.group.statistics("bad_aggregated")
except (TypeError, ValueError):
return True
return False
self.assertTrue(run_bad_aggregated())
def test_weekly_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 24), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 11), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_weekday_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 25), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 11), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_week='weekday')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_weekend_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 23), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 31), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 10, 18), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_week='weekend')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0]], [records[1]], [records[2]]])
def test_daily_group(self):
records = [
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 22, 10, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 8, 23, 10, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 9, 7, 11, 00), 1, Position()),
Record("test_itr", "in", "1", datetime.datetime(2014, 10, 18, 2, 00), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby='week', part_of_day='night')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[3]]])
grouping = bc.helper.group.group_records(user, groupby='week', part_of_day='day')
groups = [[r for r in l] for l in grouping]
self.assertEqual(groups, [[records[0], records[1]], [records[2]]])
def test_none_group(self):
records = [
Record("call", "in", "1", datetime.datetime(2014, 9, 4), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 5), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 11), 1, Position()),
Record("call", "in", "1", datetime.datetime(2014, 9, 12), 1, Position())
]
user = bc.User()
user.records = records
grouping = bc.helper.group.group_records(user, groupby=None)
self.assertEqual(records, list(next(grouping)))
self.assertRaises(StopIteration, grouping.next)
class ConsistencyTests(unittest.TestCase):
def setUp(self):
self.user = bc.User()
self.user.records = random_burst(100, delta=timedelta(days=2))
def _group_set(self, method, interaction):
chunks = group_records(self.user, groupby=method,
interaction=interaction)
new_records = set(r for c in chunks for r in c)
return new_records
def test_weekly(self):
old_records = set(self.user.records)
new_records = self._group_set('week', None)
self.assertSetEqual(new_records, old_records)
new_records = self._group_set('week', 'call')
self.assertSetEqual(new_records, {r for r in old_records
if r.interaction == 'call'})
class MissingTests(unittest.TestCase):
def setUp(self):
self.user = bc.read_csv('user_ignored', 'samples')
def test_amount(self):
result = {
'all': 4,
'interaction': 2,
'direction': 2,
'correspondent_id': 0,
'datetime': 0,
'call_duration': 1,
}
self.assertDictEqual(self.user.ignored_records, result)
def test_total_records(self):
self.assertEqual(len(self.user.records), 1)<|fim▁end|> |
def test_statistics_bad_aggregated(self): |
<|file_name|>sr-latn.min.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1
oid sha256:a396601eca15b0c281513d01941cfd37300b927b32a1bb9bb6e708a9910f1b49<|fim▁hole|><|fim▁end|> | size 2712 |
<|file_name|>calibrated_image.py<|end_file_name|><|fim▁begin|>"""calibrated_image.py was written by Ryan Petersburg for use with fiber
characterization on the EXtreme PREcision Spectrograph
"""
import numpy as np
from .base_image import BaseImage
from .numpy_array_handler import filter_image, subframe_image
class CalibratedImage(BaseImage):
"""Fiber face image analysis class
Class that contains calibration images and executes corrections based on
those images
Attributes
----------
dark : str, array_like, or None
The input used to set the dark image. See
BaseImage.convert_image_to_array() for details
ambient : str, array_like, or None
The input used to set the ambient image. See
BaseImage.convert_image_to_array() for details
flat : str, array_like, or None
The input used to set the flat image. See
BaseImage.convert_image_to_array() for details
kernel_size : int (odd)
The kernel side length used when filtering the image. This value may
need to be tweaked, especially with few co-added images, due to random
noise. The filtered image is used for the centering algorithms, so for
a "true test" use kernel_size=1, but be careful, because this may
lead to needing a fairly high threshold for the noise.
new_calibration : bool
Whether or not self.calibration has been set with new images
Args
----
image_input : str, array_like, or None, optional
See BaseImage class for details
dark : str, array_like, or None, optional
Image input to instantiate BaseImage for dark image
ambient : str, array_like, or None, optional
Image input to instantiate BaseImage for ambient image
flat : str, array_like, or None, optional
Image input to instantiate BaseImage for flat image
kernel_size : int (odd), optional
Set the kernel size for filtering
**kwargs : keworded arguments
Passed into the BaseImage superclass
"""
def __init__(self, image_input, dark=None, ambient=None, flat=None,
kernel_size=9, **kwargs):
self.dark = dark
self.ambient = ambient
self.flat = flat
self.kernel_size = kernel_size
self.new_calibration = True
super(CalibratedImage, self).__init__(image_input, **kwargs)
#=========================================================================#
#==== Primary Image Getters ==============================================#
#=========================================================================#
def get_uncorrected_image(self):
"""Return the raw image without corrections or filtering.
Returns
-------
uncorrected_image : 2D numpy array
Raw image or average of images (depending on image_input)
"""
return self.convert_image_to_array(self.image_input)
def get_image(self):
"""Return the corrected image
This method must be called to get access to the corrected 2D numpy
array being analyzed. Attempts to access a previously saved image
under self.image_file or otherwise applies corrections to the raw
images pulled from their respective files
Returns
-------
image : 2D numpy array
Image corrected by calibration images
"""
if self.image_file is not None and not self.new_calibration:
return self.image_from_file(self.image_file)
return self.execute_error_corrections(self.get_uncorrected_image())
def get_uncorrected_filtered_image(self, kernel_size=None, **kwargs):
"""Return a median filtered image
Args
----
kernel_size : {None, int (odd)}, optional
The side length of the kernel used to median filter the image. Uses
self.kernel_size if None.
Returns
-------
filtered_image : 2D numpy array
The stored image median filtered with the given kernel_size
"""
image = self.get_uncorrected_image()
if image is None:
return None
if kernel_size is None:
kernel_size = self.kernel_size
return filter_image(image, kernel_size, **kwargs)
def get_filtered_image(self, kernel_size=None, **kwargs):
"""Return an error corrected and median filtered image
Returns
-------
filtered_image : 2D numpy array
The stored image median filtered with the given kernel_size and
error corrected using the given method
"""
image = self.get_image()
if image is None:
return None
if kernel_size is None:
kernel_size = self.kernel_size
return filter_image(image, kernel_size, **kwargs)
#=========================================================================#
#==== Calibration Image Getters ==========================================#
#=========================================================================#
def get_dark_image(self):
"""Returns the dark image.
Args
----
full_output : boolean, optional
Passed to converImageToArray function
Returns
-------
dark_image : 2D numpy array
The dark image
output_obj : ImageInfo, optional
Object containing information about the image, if full_output=True
"""
return BaseImage(self.dark).get_image()
def get_ambient_image(self):
"""Returns the ambient image.
Args
----
full_output : boolean, optional
Passed to converImageToArray function
Returns
-------
dark_image : 2D numpy array
The dark image
output_obj : ImageInfo, optional
Object containing information about the image, if full_output=True
"""
return CalibratedImage(self.ambient, dark=self.dark).get_image()
def get_flat_image(self):
"""Returns the flat image.
Args
----
full_output : boolean, optional
Passed to converImageToArray function
<|fim▁hole|> -------
dark_image : 2D numpy array
The dark image
output_obj : ImageInfo, optional
Object containing information about the image, if full_output=True
"""
return CalibratedImage(self.flat, dark=self.dark).get_image()
def set_dark(self, dark):
"""Sets the dark calibration image."""
self.dark = dark
self.new_calibration = True
def set_ambient(self, ambient):
"""Sets the ambient calibration image."""
self.ambient = ambient
self.new_calibration = True
def set_flat(self, flat):
"""Sets the flat calibration images."""
self.flat = flat
self.new_calibration = True
#=========================================================================#
#==== Image Calibration Algorithm ========================================#
#=========================================================================#
def execute_error_corrections(self, image):
"""Applies corrective images to image
Applies dark image to the flat field and ambient images. Then applies
flat field and ambient image correction to the primary image
Args
----
image : 2D numpy array
Image to be corrected
Returns
-------
corrected_image : 2D numpy array
Corrected image
"""
if image is None:
return None
corrected_image = image
dark_image = self.get_dark_image()
if dark_image is not None and dark_image.shape != corrected_image.shape:
dark_image = subframe_image(dark_image, self.subframe_x,
self.subframe_y, self.width,
self.height)
corrected_image = self.remove_dark_image(corrected_image,
dark_image)
ambient_image = self.get_ambient_image()
if ambient_image is not None:
if ambient_image.shape != corrected_image.shape:
ambient_image = subframe_image(ambient_image, self.subframe_x,
self.subframe_y, self.width,
self.height)
ambient_exp_time = BaseImage(self.ambient).exp_time
if self.exp_time is not None and ambient_exp_time != self.exp_time:
corrected_image = self.remove_dark_image(corrected_image,
ambient_image
* self.exp_time
/ ambient_exp_time)
else:
corrected_image = self.remove_dark_image(corrected_image,
ambient_image)
flat_image = self.get_flat_image()
if flat_image is not None:
if flat_image.shape != corrected_image.shape:
flat_image = subframe_image(flat_image, self.subframe_x,
self.subframe_y, self.width,
self.height)
corrected_image *= flat_image.mean() / flat_image
self.new_calibration = False
return corrected_image
def remove_dark_image(self, image, dark_image=None):
"""Uses dark image to correct image
Args
----
image : 2D numpy array
numpy array of the image
dark_image : 2D numpy array
dark image to be removed
Returns
-------
output_array : 2D numpy array
corrected image
"""
if dark_image is None:
dark_image = self.get_dark_image()
if dark_image is None:
dark_image = np.zeros_like(image)
output_image = image - dark_image
# Renormalize to the approximate smallest value (avoiding hot pixels)
output_image -= filter_image(output_image, 5).min()
# Prevent any dark/ambient image hot pixels from leaking through
output_image *= (output_image > -1000.0).astype('uint8')
return output_image
#=========================================================================#
#==== Attribute Setters ==================================================#
#=========================================================================#
def set_attributes_from_object(self, object_file):
super(CalibratedImage, self).set_attributes_from_object(object_file)
self.dark = self.change_path(self.dark)
self.ambient = self.change_path(self.ambient)
self.flat = self.change_path(self.flat)<|fim▁end|> | Returns
|
<|file_name|>interface.rs<|end_file_name|><|fim▁begin|>/**
* Websocket implementation taken from https://github.com/nbaksalyar/mio-websocket
*/
/// High-level WebSocket library interface
use std::net::SocketAddr;
use std::thread;
use std::sync::mpsc;
use std::time::Duration;
use mio::{Token, EventLoop, EventSet, PollOpt, Sender, NotifyError};
use mio::tcp::{TcpListener};
use crate::ws_essentials::StatusCode;
use crate::ws_lib::server::{SERVER_TOKEN, WebSocketServer};
#[derive(Clone)]
pub enum WebSocketEvent {
Connect,
Close(StatusCode),
TextMessage(String),
#[allow(dead_code)]
Ping(Box<[u8]>),
#[allow(dead_code)]
Pong(Box<[u8]>),
BinaryMessage(Vec<u8>),
}
pub enum WebSocketInternalMessage {
GetPeers(mpsc::Sender<Vec<Token>>),
SendMessage((Token, WebSocketEvent)),
Reregister(Token),
}
pub struct WebSocket {
events: mpsc::Receiver<(Token, WebSocketEvent)>,
event_loop_tx: Sender<WebSocketInternalMessage>,
}
impl WebSocket {
pub fn new(address: SocketAddr) -> WebSocket {
let (tx, rx) = mpsc::channel();
let mut event_loop = EventLoop::new().unwrap();
let event_loop_tx = event_loop.channel();
thread::spawn(move || {
let server_socket = TcpListener::bind(&address).unwrap();
let mut server = WebSocketServer::new(server_socket, tx);
event_loop.register(&server.socket,
SERVER_TOKEN,
EventSet::readable(),
PollOpt::edge()).unwrap();
event_loop.run(&mut server).unwrap();
});
WebSocket {
event_loop_tx,
events: rx,
}
}
pub fn next(&mut self) -> (Token, WebSocketEvent) {
self.events.recv().unwrap()
}
pub fn get_connected(&mut self) -> Result<Vec<Token>, mpsc::RecvError> {
let (tx, rx) = mpsc::channel();
self.send_internal(WebSocketInternalMessage::GetPeers(tx)).expect("Can't send data to socket");
rx.recv()
}
pub fn send(&mut self, msg: (Token, WebSocketEvent)) {
self.send_internal(WebSocketInternalMessage::SendMessage(msg)).expect("Can't send data to socket");
}
fn send_internal(&mut self, msg: WebSocketInternalMessage) -> Result<(), NotifyError<WebSocketInternalMessage>> {
let mut val = msg;
loop {
match self.event_loop_tx.send(val) {
Err(NotifyError::Full(ret)) => {
// The notify queue is full, retry after some time.
val = ret;
thread::sleep(Duration::from_millis(10));
}
result @ _ => return result,
}
}
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for readbacks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
<|fim▁hole|>
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()<|fim▁end|> | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readbacks.settings") |
<|file_name|>evidenceScore.py<|end_file_name|><|fim▁begin|>from contentbase.upgrader import upgrade_step
@upgrade_step('evidenceScore', '1', '2')
def evidenceScore_1_2(value, system):
# https://github.com/ClinGen/clincoded/issues/1507
# Add affiliation property and update schema version<|fim▁hole|><|fim▁end|> | return |
<|file_name|>eth.rs<|end_file_name|><|fim▁begin|>// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Eth rpc implementation.
use std::thread;
use std::time::{Instant, Duration};
use std::sync::Arc;
use rlp::{self, UntrustedRlp};
use time::get_time;
use bigint::prelude::U256;
use bigint::hash::{H64, H160, H256};
use util::Address;
use parking_lot::Mutex;
use ethash::SeedHashCompute;
use ethcore::account_provider::{AccountProvider, DappId};
use ethcore::block::IsBlock;
use ethcore::client::{MiningBlockChainClient, BlockId, TransactionId, UncleId};
use ethcore::ethereum::Ethash;
use ethcore::filter::Filter as EthcoreFilter;
use ethcore::header::{Header as BlockHeader, BlockNumber as EthBlockNumber};
use ethcore::log_entry::LogEntry;
use ethcore::miner::{MinerService, ExternalMinerService};
use ethcore::transaction::SignedTransaction;
use ethcore::snapshot::SnapshotService;
use ethsync::{SyncProvider};
use jsonrpc_core::{BoxFuture, Result};
use jsonrpc_core::futures::future;
use jsonrpc_macros::Trailing;
use v1::helpers::{errors, limit_logs, fake_sign};
use v1::helpers::dispatch::{FullDispatcher, default_gas_price};
use v1::helpers::block_import::is_major_importing;
use v1::helpers::accounts::unwrap_provider;
use v1::traits::Eth;
use v1::types::{
RichBlock, Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo,
Transaction, CallRequest, Index, Filter, Log, Receipt, Work,
H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256,
};
use v1::metadata::Metadata;
const EXTRA_INFO_PROOF: &'static str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed";
/// Eth RPC options
pub struct EthClientOptions {
/// Return nonce from transaction queue when pending block not available.
pub pending_nonce_from_queue: bool,
/// Returns receipt from pending blocks
pub allow_pending_receipt_query: bool,
/// Send additional block number when asking for work
pub send_block_number_in_get_work: bool,
}
impl EthClientOptions {
/// Creates new default `EthClientOptions` and allows alterations
/// by provided function.
pub fn with<F: Fn(&mut Self)>(fun: F) -> Self {
let mut options = Self::default();
fun(&mut options);
options
}
}
impl Default for EthClientOptions {
fn default() -> Self {
EthClientOptions {
pending_nonce_from_queue: false,
allow_pending_receipt_query: true,
send_block_number_in_get_work: true,
}
}
}
/// Eth rpc implementation.
pub struct EthClient<C, SN: ?Sized, S: ?Sized, M, EM> where
C: MiningBlockChainClient,
SN: SnapshotService,
S: SyncProvider,
M: MinerService,
EM: ExternalMinerService {
client: Arc<C>,
snapshot: Arc<SN>,
sync: Arc<S>,
accounts: Option<Arc<AccountProvider>>,
miner: Arc<M>,
external_miner: Arc<EM>,<|fim▁hole|> options: EthClientOptions,
eip86_transition: u64,
}
impl<C, SN: ?Sized, S: ?Sized, M, EM> EthClient<C, SN, S, M, EM> where
C: MiningBlockChainClient,
SN: SnapshotService,
S: SyncProvider,
M: MinerService,
EM: ExternalMinerService {
/// Creates new EthClient.
pub fn new(
client: &Arc<C>,
snapshot: &Arc<SN>,
sync: &Arc<S>,
accounts: &Option<Arc<AccountProvider>>,
miner: &Arc<M>,
em: &Arc<EM>,
options: EthClientOptions
) -> Self {
EthClient {
client: client.clone(),
snapshot: snapshot.clone(),
sync: sync.clone(),
miner: miner.clone(),
accounts: accounts.clone(),
external_miner: em.clone(),
seed_compute: Mutex::new(SeedHashCompute::new()),
options: options,
eip86_transition: client.eip86_transition(),
}
}
/// Attempt to get the `Arc<AccountProvider>`, errors if provider was not
/// set.
fn account_provider(&self) -> Result<Arc<AccountProvider>> {
unwrap_provider(&self.accounts)
}
fn block(&self, id: BlockId, include_txs: bool) -> Result<Option<RichBlock>> {
let client = &self.client;
match (client.block(id.clone()), client.block_total_difficulty(id)) {
(Some(block), Some(total_difficulty)) => {
let view = block.header_view();
Ok(Some(RichBlock {
inner: Block {
hash: Some(view.hash().into()),
size: Some(block.rlp().as_raw().len().into()),
parent_hash: view.parent_hash().into(),
uncles_hash: view.uncles_hash().into(),
author: view.author().into(),
miner: view.author().into(),
state_root: view.state_root().into(),
transactions_root: view.transactions_root().into(),
receipts_root: view.receipts_root().into(),
number: Some(view.number().into()),
gas_used: view.gas_used().into(),
gas_limit: view.gas_limit().into(),
logs_bloom: view.log_bloom().into(),
timestamp: view.timestamp().into(),
difficulty: view.difficulty().into(),
total_difficulty: Some(total_difficulty.into()),
seal_fields: view.seal().into_iter().map(Into::into).collect(),
uncles: block.uncle_hashes().into_iter().map(Into::into).collect(),
transactions: match include_txs {
true => BlockTransactions::Full(block.view().localized_transactions().into_iter().map(|t| Transaction::from_localized(t, self.eip86_transition)).collect()),
false => BlockTransactions::Hashes(block.transaction_hashes().into_iter().map(Into::into).collect()),
},
extra_data: Bytes::new(view.extra_data()),
},
extra_info: client.block_extra_info(id.clone()).expect(EXTRA_INFO_PROOF),
}))
},
_ => Ok(None)
}
}
fn transaction(&self, id: TransactionId) -> Result<Option<Transaction>> {
match self.client.transaction(id) {
Some(t) => Ok(Some(Transaction::from_localized(t, self.eip86_transition))),
None => Ok(None),
}
}
fn uncle(&self, id: UncleId) -> Result<Option<RichBlock>> {
let client = &self.client;
let uncle: BlockHeader = match client.uncle(id) {
Some(hdr) => hdr.decode(),
None => { return Ok(None); }
};
let parent_difficulty = match client.block_total_difficulty(BlockId::Hash(uncle.parent_hash().clone())) {
Some(difficulty) => difficulty,
None => { return Ok(None); }
};
let size = client.block(BlockId::Hash(uncle.hash()))
.map(|block| block.into_inner().len())
.map(U256::from)
.map(Into::into);
let block = RichBlock {
inner: Block {
hash: Some(uncle.hash().into()),
size: size,
parent_hash: uncle.parent_hash().clone().into(),
uncles_hash: uncle.uncles_hash().clone().into(),
author: uncle.author().clone().into(),
miner: uncle.author().clone().into(),
state_root: uncle.state_root().clone().into(),
transactions_root: uncle.transactions_root().clone().into(),
number: Some(uncle.number().into()),
gas_used: uncle.gas_used().clone().into(),
gas_limit: uncle.gas_limit().clone().into(),
logs_bloom: uncle.log_bloom().clone().into(),
timestamp: uncle.timestamp().into(),
difficulty: uncle.difficulty().clone().into(),
total_difficulty: Some((uncle.difficulty().clone() + parent_difficulty).into()),
receipts_root: uncle.receipts_root().clone().into(),
extra_data: uncle.extra_data().clone().into(),
seal_fields: uncle.seal().into_iter().cloned().map(Into::into).collect(),
uncles: vec![],
transactions: BlockTransactions::Hashes(vec![]),
},
extra_info: client.uncle_extra_info(id).expect(EXTRA_INFO_PROOF),
};
Ok(Some(block))
}
fn dapp_accounts(&self, dapp: DappId) -> Result<Vec<H160>> {
let store = self.account_provider()?;
store
.note_dapp_used(dapp.clone())
.and_then(|_| store.dapp_addresses(dapp))
.map_err(|e| errors::account("Could not fetch accounts.", e))
}
}
pub fn pending_logs<M>(miner: &M, best_block: EthBlockNumber, filter: &EthcoreFilter) -> Vec<Log> where M: MinerService {
let receipts = miner.pending_receipts(best_block);
let pending_logs = receipts.into_iter()
.flat_map(|(hash, r)| r.logs.into_iter().map(|l| (hash.clone(), l)).collect::<Vec<(H256, LogEntry)>>())
.collect::<Vec<(H256, LogEntry)>>();
let result = pending_logs.into_iter()
.filter(|pair| filter.matches(&pair.1))
.map(|pair| {
let mut log = Log::from(pair.1);
log.transaction_hash = Some(pair.0.into());
log
})
.collect();
result
}
fn check_known<C>(client: &C, number: BlockNumber) -> Result<()> where C: MiningBlockChainClient {
use ethcore::block_status::BlockStatus;
match client.block_status(number.into()) {
BlockStatus::InChain => Ok(()),
BlockStatus::Pending => Ok(()),
_ => Err(errors::unknown_block()),
}
}
const MAX_QUEUE_SIZE_TO_MINE_ON: usize = 4; // because uncles go back 6.
impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
C: MiningBlockChainClient + 'static,
SN: SnapshotService + 'static,
S: SyncProvider + 'static,
M: MinerService + 'static,
EM: ExternalMinerService + 'static,
{
type Metadata = Metadata;
fn protocol_version(&self) -> Result<String> {
let version = self.sync.status().protocol_version.to_owned();
Ok(format!("{}", version))
}
fn syncing(&self) -> Result<SyncStatus> {
use ethcore::snapshot::RestorationStatus;
let status = self.sync.status();
let client = &self.client;
let snapshot_status = self.snapshot.status();
let (warping, warp_chunks_amount, warp_chunks_processed) = match snapshot_status {
RestorationStatus::Ongoing { state_chunks, block_chunks, state_chunks_done, block_chunks_done } =>
(true, Some(block_chunks + state_chunks), Some(block_chunks_done + state_chunks_done)),
_ => (false, None, None),
};
if warping || is_major_importing(Some(status.state), client.queue_info()) {
let chain_info = client.chain_info();
let current_block = U256::from(chain_info.best_block_number);
let highest_block = U256::from(status.highest_block_number.unwrap_or(status.start_block_number));
let info = SyncInfo {
starting_block: status.start_block_number.into(),
current_block: current_block.into(),
highest_block: highest_block.into(),
warp_chunks_amount: warp_chunks_amount.map(|x| U256::from(x as u64)).map(Into::into),
warp_chunks_processed: warp_chunks_processed.map(|x| U256::from(x as u64)).map(Into::into),
};
Ok(SyncStatus::Info(info))
} else {
Ok(SyncStatus::None)
}
}
fn author(&self, meta: Metadata) -> Result<RpcH160> {
let dapp = meta.dapp_id();
let mut miner = self.miner.author();
if miner == 0.into() {
miner = self.dapp_accounts(dapp.into())?.get(0).cloned().unwrap_or_default();
}
Ok(RpcH160::from(miner))
}
fn is_mining(&self) -> Result<bool> {
Ok(self.miner.is_currently_sealing())
}
fn hashrate(&self) -> Result<RpcU256> {
Ok(RpcU256::from(self.external_miner.hashrate()))
}
fn gas_price(&self) -> Result<RpcU256> {
Ok(RpcU256::from(default_gas_price(&*self.client, &*self.miner)))
}
fn accounts(&self, meta: Metadata) -> Result<Vec<RpcH160>> {
let dapp = meta.dapp_id();
let accounts = self.dapp_accounts(dapp.into())?;
Ok(accounts.into_iter().map(Into::into).collect())
}
fn block_number(&self) -> Result<RpcU256> {
Ok(RpcU256::from(self.client.chain_info().best_block_number))
}
fn balance(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256> {
let address = address.into();
let id = num.unwrap_or_default();
try_bf!(check_known(&*self.client, id.clone()));
let res = match self.client.balance(&address, id.into()) {
Some(balance) => Ok(balance.into()),
None => Err(errors::state_pruned()),
};
Box::new(future::done(res))
}
fn storage_at(&self, address: RpcH160, pos: RpcU256, num: Trailing<BlockNumber>) -> BoxFuture<RpcH256> {
let address: Address = RpcH160::into(address);
let position: U256 = RpcU256::into(pos);
let id = num.unwrap_or_default();
try_bf!(check_known(&*self.client, id.clone()));
let res = match self.client.storage_at(&address, &H256::from(position), id.into()) {
Some(s) => Ok(s.into()),
None => Err(errors::state_pruned()),
};
Box::new(future::done(res))
}
fn transaction_count(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256> {
let address: Address = RpcH160::into(address);
let res = match num.unwrap_or_default() {
BlockNumber::Pending if self.options.pending_nonce_from_queue => {
let nonce = self.miner.last_nonce(&address)
.map(|n| n + 1.into())
.or_else(|| self.client.nonce(&address, BlockNumber::Pending.into()));
match nonce {
Some(nonce) => Ok(nonce.into()),
None => Err(errors::database("latest nonce missing"))
}
}
id => {
try_bf!(check_known(&*self.client, id.clone()));
match self.client.nonce(&address, id.into()) {
Some(nonce) => Ok(nonce.into()),
None => Err(errors::state_pruned()),
}
}
};
Box::new(future::done(res))
}
fn block_transaction_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>> {
Box::new(future::ok(self.client.block(BlockId::Hash(hash.into()))
.map(|block| block.transactions_count().into())))
}
fn block_transaction_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>> {
Box::new(future::ok(match num {
BlockNumber::Pending => Some(
self.miner.status().transactions_in_pending_block.into()
),
_ =>
self.client.block(num.into())
.map(|block| block.transactions_count().into())
}))
}
fn block_uncles_count_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<RpcU256>> {
Box::new(future::ok(self.client.block(BlockId::Hash(hash.into()))
.map(|block| block.uncles_count().into())))
}
fn block_uncles_count_by_number(&self, num: BlockNumber) -> BoxFuture<Option<RpcU256>> {
Box::new(future::ok(match num {
BlockNumber::Pending => Some(0.into()),
_ => self.client.block(num.into())
.map(|block| block.uncles_count().into()
),
}))
}
fn code_at(&self, address: RpcH160, num: Trailing<BlockNumber>) -> BoxFuture<Bytes> {
let address: Address = RpcH160::into(address);
let id = num.unwrap_or_default();
try_bf!(check_known(&*self.client, id.clone()));
let res = match self.client.code(&address, id.into()) {
Some(code) => Ok(code.map_or_else(Bytes::default, Bytes::new)),
None => Err(errors::state_pruned()),
};
Box::new(future::done(res))
}
fn block_by_hash(&self, hash: RpcH256, include_txs: bool) -> BoxFuture<Option<RichBlock>> {
Box::new(future::done(self.block(BlockId::Hash(hash.into()), include_txs)))
}
fn block_by_number(&self, num: BlockNumber, include_txs: bool) -> BoxFuture<Option<RichBlock>> {
Box::new(future::done(self.block(num.into(), include_txs)))
}
fn transaction_by_hash(&self, hash: RpcH256) -> BoxFuture<Option<Transaction>> {
let hash: H256 = hash.into();
let block_number = self.client.chain_info().best_block_number;
let tx = try_bf!(self.transaction(TransactionId::Hash(hash))).or_else(|| {
self.miner.transaction(block_number, &hash)
.map(|t| Transaction::from_pending(t, block_number, self.eip86_transition))
});
Box::new(future::ok(tx))
}
fn transaction_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> BoxFuture<Option<Transaction>> {
Box::new(future::done(
self.transaction(TransactionId::Location(BlockId::Hash(hash.into()), index.value()))
))
}
fn transaction_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> BoxFuture<Option<Transaction>> {
Box::new(future::done(
self.transaction(TransactionId::Location(num.into(), index.value()))
))
}
fn transaction_receipt(&self, hash: RpcH256) -> BoxFuture<Option<Receipt>> {
let best_block = self.client.chain_info().best_block_number;
let hash: H256 = hash.into();
match (self.miner.pending_receipt(best_block, &hash), self.options.allow_pending_receipt_query) {
(Some(receipt), true) => Box::new(future::ok(Some(receipt.into()))),
_ => {
let receipt = self.client.transaction_receipt(TransactionId::Hash(hash));
Box::new(future::ok(receipt.map(Into::into)))
}
}
}
fn uncle_by_block_hash_and_index(&self, hash: RpcH256, index: Index) -> BoxFuture<Option<RichBlock>> {
Box::new(future::done(self.uncle(UncleId {
block: BlockId::Hash(hash.into()),
position: index.value()
})))
}
fn uncle_by_block_number_and_index(&self, num: BlockNumber, index: Index) -> BoxFuture<Option<RichBlock>> {
Box::new(future::done(self.uncle(UncleId {
block: num.into(),
position: index.value()
})))
}
fn compilers(&self) -> Result<Vec<String>> {
Err(errors::deprecated("Compilation functionality is deprecated.".to_string()))
}
fn logs(&self, filter: Filter) -> BoxFuture<Vec<Log>> {
let include_pending = filter.to_block == Some(BlockNumber::Pending);
let filter: EthcoreFilter = filter.into();
let mut logs = self.client.logs(filter.clone())
.into_iter()
.map(From::from)
.collect::<Vec<Log>>();
if include_pending {
let best_block = self.client.chain_info().best_block_number;
let pending = pending_logs(&*self.miner, best_block, &filter);
logs.extend(pending);
}
let logs = limit_logs(logs, filter.limit);
Box::new(future::ok(logs))
}
fn work(&self, no_new_work_timeout: Trailing<u64>) -> Result<Work> {
if !self.miner.can_produce_work_package() {
warn!(target: "miner", "Cannot give work package - engine seals internally.");
return Err(errors::no_work_required())
}
let no_new_work_timeout = no_new_work_timeout.unwrap_or_default();
// check if we're still syncing and return empty strings in that case
{
//TODO: check if initial sync is complete here
//let sync = self.sync;
if /*sync.status().state != SyncState::Idle ||*/ self.client.queue_info().total_queue_size() > MAX_QUEUE_SIZE_TO_MINE_ON {
trace!(target: "miner", "Syncing. Cannot give any work.");
return Err(errors::no_work());
}
// Otherwise spin until our submitted block has been included.
let timeout = Instant::now() + Duration::from_millis(1000);
while Instant::now() < timeout && self.client.queue_info().total_queue_size() > 0 {
thread::sleep(Duration::from_millis(1));
}
}
if self.miner.author().is_zero() {
warn!(target: "miner", "Cannot give work package - no author is configured. Use --author to configure!");
return Err(errors::no_author())
}
self.miner.map_sealing_work(&*self.client, |b| {
let pow_hash = b.hash();
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
let seed_hash = self.seed_compute.lock().hash_block_number(b.block().header().number());
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 {
Err(errors::no_new_work())
} else if self.options.send_block_number_in_get_work {
let block_number = b.block().header().number();
Ok(Work {
pow_hash: pow_hash.into(),
seed_hash: seed_hash.into(),
target: target.into(),
number: Some(block_number),
})
} else {
Ok(Work {
pow_hash: pow_hash.into(),
seed_hash: seed_hash.into(),
target: target.into(),
number: None
})
}
}).unwrap_or(Err(errors::internal("No work found.", "")))
}
fn submit_work(&self, nonce: RpcH64, pow_hash: RpcH256, mix_hash: RpcH256) -> Result<bool> {
if !self.miner.can_produce_work_package() {
warn!(target: "miner", "Cannot submit work - engine seals internally.");
return Err(errors::no_work_required())
}
let nonce: H64 = nonce.into();
let pow_hash: H256 = pow_hash.into();
let mix_hash: H256 = mix_hash.into();
trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash);
let seal = vec![rlp::encode(&mix_hash).into_vec(), rlp::encode(&nonce).into_vec()];
Ok(self.miner.submit_seal(&*self.client, pow_hash, seal).is_ok())
}
fn submit_hashrate(&self, rate: RpcU256, id: RpcH256) -> Result<bool> {
self.external_miner.submit_hashrate(rate.into(), id.into());
Ok(true)
}
fn send_raw_transaction(&self, raw: Bytes) -> Result<RpcH256> {
UntrustedRlp::new(&raw.into_vec()).as_val()
.map_err(errors::rlp)
.and_then(|tx| SignedTransaction::new(tx).map_err(errors::transaction))
.and_then(|signed_transaction| {
FullDispatcher::dispatch_transaction(
&*self.client,
&*self.miner,
signed_transaction.into(),
)
})
.map(Into::into)
}
fn submit_transaction(&self, raw: Bytes) -> Result<RpcH256> {
self.send_raw_transaction(raw)
}
fn call(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<Bytes> {
let request = CallRequest::into(request);
let signed = try_bf!(fake_sign::sign_call(request, meta.is_dapp()));
let num = num.unwrap_or_default();
let result = self.client.call(&signed, Default::default(), num.into());
Box::new(future::done(result
.map(|b| b.output.into())
.map_err(errors::call)
))
}
fn estimate_gas(&self, meta: Self::Metadata, request: CallRequest, num: Trailing<BlockNumber>) -> BoxFuture<RpcU256> {
let request = CallRequest::into(request);
let signed = try_bf!(fake_sign::sign_call(request, meta.is_dapp()));
Box::new(future::done(self.client.estimate_gas(&signed, num.unwrap_or_default().into())
.map(Into::into)
.map_err(errors::call)
))
}
fn compile_lll(&self, _: String) -> Result<Bytes> {
Err(errors::deprecated("Compilation of LLL via RPC is deprecated".to_string()))
}
fn compile_serpent(&self, _: String) -> Result<Bytes> {
Err(errors::deprecated("Compilation of Serpent via RPC is deprecated".to_string()))
}
fn compile_solidity(&self, _: String) -> Result<Bytes> {
Err(errors::deprecated("Compilation of Solidity via RPC is deprecated".to_string()))
}
}<|fim▁end|> | seed_compute: Mutex<SeedHashCompute>, |
<|file_name|>PSBuildReferenceTaxonomyUtils.java<|end_file_name|><|fim▁begin|>package org.broadinstitute.hellbender.tools.spark.pathseq;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Output;
import htsjdk.samtools.SAMSequenceRecord;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.broadinstitute.hellbender.exceptions.UserException;
import org.broadinstitute.hellbender.utils.io.IOUtils;
import scala.Tuple2;
import java.io.*;
import java.util.*;
import java.util.zip.GZIPInputStream;
public final class PSBuildReferenceTaxonomyUtils {
protected static final Logger logger = LogManager.getLogger(PSBuildReferenceTaxonomyUtils.class);
private static final String VERTICAL_BAR_DELIMITER_REGEX = "\\s*\\|\\s*";
/**
* Build set of accessions contained in the reference.
* Returns: a map from accession to the name and length of the record. If the sequence name contains the
* taxonomic ID, it instead gets added to taxIdToProperties. Later we merge both results into taxIdToProperties.
* Method: First, look for either "taxid|<taxid>|" or "ref|<accession>|" in the sequence name. If neither of
* those are found, use the first word of the name as the accession.
*/
protected static Map<String, Tuple2<String, Long>> parseReferenceRecords(final List<SAMSequenceRecord> dictionaryList,
final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties) {
final Map<String, Tuple2<String, Long>> accessionToNameAndLength = new HashMap<>();
for (final SAMSequenceRecord record : dictionaryList) {
final String recordName = record.getSequenceName();
final long recordLength = record.getSequenceLength();
final String[] tokens = recordName.split(VERTICAL_BAR_DELIMITER_REGEX);
String recordAccession = null;
int recordTaxId = PSTree.NULL_NODE;
for (int i = 0; i < tokens.length - 1 && recordTaxId == PSTree.NULL_NODE; i++) {
if (tokens[i].equals("ref")) {
recordAccession = tokens[i + 1];
} else if (tokens[i].equals("taxid")) {
recordTaxId = parseTaxonId(tokens[i + 1]);
}
}
if (recordTaxId == PSTree.NULL_NODE) {
if (recordAccession == null) {
final String[] tokens2 = tokens[0].split(" "); //Default accession to first word in the name
recordAccession = tokens2[0];
}
accessionToNameAndLength.put(recordAccession, new Tuple2<>(recordName, recordLength));
} else {
addReferenceAccessionToTaxon(recordTaxId, recordName, recordLength, taxIdToProperties);
}
}
return accessionToNameAndLength;
}
private static int parseTaxonId(final String taxonId) {
try {
return Integer.valueOf(taxonId);
} catch (final NumberFormatException e) {
throw new UserException.BadInput("Expected taxonomy ID to be an integer but found \"" + taxonId + "\"", e);
}
}
/**
* Helper classes for defining RefSeq and GenBank catalog formats. Columns should be given as 0-based indices.
*/
private interface AccessionCatalogFormat {
int getTaxIdColumn();
int getAccessionColumn();
}
private static final class RefSeqCatalogFormat implements AccessionCatalogFormat {
private static final int TAX_ID_COLUMN = 0;
private static final int ACCESSION_COLUMN = 2;
public int getTaxIdColumn() {
return TAX_ID_COLUMN;
}
public int getAccessionColumn() {
return ACCESSION_COLUMN;
}
}
private static final class GenBankCatalogFormat implements AccessionCatalogFormat {
private static final int TAX_ID_COLUMN = 6;
private static final int ACCESSION_COLUMN = 1;
public int getTaxIdColumn() {
return TAX_ID_COLUMN;
}
public int getAccessionColumn() {
return ACCESSION_COLUMN;
}
}
<|fim▁hole|> * Builds maps of reference contig accessions to their taxonomic ids and vice versa.
* Input can be a RefSeq or Genbank catalog file. accNotFound is an initial list of
* accessions from the reference that have not been successfully looked up; if null,
* will be initialized to the accToRefInfo key set by default.
* <p>
* Returns a collection of reference accessions that could not be found, if any.
*/
protected static Set<String> parseCatalog(final BufferedReader reader,
final Map<String, Tuple2<String, Long>> accessionToNameAndLength,
final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties,
final boolean bGenBank,
final Set<String> accessionsNotFoundIn) {
final Set<String> accessionsNotFoundOut;
try {
String line;
final AccessionCatalogFormat catalogFormat = bGenBank ? new GenBankCatalogFormat() : new RefSeqCatalogFormat();
final int taxIdColumnIndex = catalogFormat.getTaxIdColumn();
final int accessionColumnIndex = catalogFormat.getAccessionColumn();
if (accessionsNotFoundIn == null) {
//If accessionsNotFoundIn is null, this is the first call to parseCatalog, so initialize the set to all accessions
accessionsNotFoundOut = new HashSet<>(accessionToNameAndLength.keySet());
} else {
//Otherwise this is a subsequent call and we continue to look for any remaining accessions
accessionsNotFoundOut = new HashSet<>(accessionsNotFoundIn);
}
final int minColumns = Math.max(taxIdColumnIndex, accessionColumnIndex) + 1;
long lineNumber = 1;
while ((line = reader.readLine()) != null && !line.isEmpty()) {
final String[] tokens = line.trim().split("\t", minColumns + 1);
if (tokens.length >= minColumns) {
final int taxId = parseTaxonId(tokens[taxIdColumnIndex]);
final String accession = tokens[accessionColumnIndex];
if (accessionToNameAndLength.containsKey(accession)) {
final Tuple2<String, Long> nameAndLength = accessionToNameAndLength.get(accession);
addReferenceAccessionToTaxon(taxId, nameAndLength._1, nameAndLength._2, taxIdToProperties);
accessionsNotFoundOut.remove(accession);
}
} else {
throw new UserException.BadInput("Expected at least " + minColumns + " tab-delimited columns in " +
"GenBank catalog file, but only found " + tokens.length + " on line " + lineNumber);
}
lineNumber++;
}
} catch (final IOException e) {
throw new UserException.CouldNotReadInputFile("Error reading from catalog file", e);
}
return accessionsNotFoundOut;
}
/**
* Parses scientific name of each taxon and puts it in taxIdToProperties
*/
protected static void parseNamesFile(final BufferedReader reader, final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties) {
try {
String line;
while ((line = reader.readLine()) != null) {
//Split into columns delimited by <TAB>|<TAB>
final String[] tokens = line.split(VERTICAL_BAR_DELIMITER_REGEX);
if (tokens.length < 4) {
throw new UserException.BadInput("Expected at least 4 columns in tax dump names file but found " + tokens.length);
}
final String nameType = tokens[3];
if (nameType.equals("scientific name")) {
final int taxId = parseTaxonId(tokens[0]);
final String name = tokens[1];
if (taxIdToProperties.containsKey(taxId)) {
taxIdToProperties.get(taxId).setName(name);
} else {
taxIdToProperties.put(taxId, new PSPathogenReferenceTaxonProperties(name));
}
}
}
} catch (final IOException e) {
throw new UserException.CouldNotReadInputFile("Error reading from taxonomy dump names file", e);
}
}
/**
* Gets the rank and parent of each taxon.
* Returns a Collection of tax ID's found in the nodes file that are not in taxIdToProperties (i.e. were not found in
* a reference sequence name using the taxid|\<taxid\> tag or the catalog file).
*/
protected static Collection<Integer> parseNodesFile(final BufferedReader reader, final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties) {
try {
final Collection<Integer> taxIdsNotFound = new ArrayList<>();
String line;
while ((line = reader.readLine()) != null) {
final String[] tokens = line.split(VERTICAL_BAR_DELIMITER_REGEX);
if (tokens.length < 3) {
throw new UserException.BadInput("Expected at least 3 columns in tax dump nodes file but found " + tokens.length);
}
final int taxId = parseTaxonId(tokens[0]);
final int parent = parseTaxonId(tokens[1]);
final String rank = tokens[2];
final PSPathogenReferenceTaxonProperties taxonProperties;
if (taxIdToProperties.containsKey(taxId)) {
taxonProperties = taxIdToProperties.get(taxId);
} else {
taxonProperties = new PSPathogenReferenceTaxonProperties("tax_" + taxId);
taxIdsNotFound.add(taxId);
}
taxonProperties.setRank(rank);
if (taxId != PSTaxonomyConstants.ROOT_ID) { //keep root's parent set to null
taxonProperties.setParent(parent);
}
taxIdToProperties.put(taxId, taxonProperties);
}
return taxIdsNotFound;
} catch (final IOException e) {
throw new UserException.CouldNotReadInputFile("Error reading from taxonomy dump nodes file", e);
}
}
/**
* Helper function for building the map from tax id to reference contig accession
*/
private static void addReferenceAccessionToTaxon(final int taxId, final String accession, final long length, final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties) {
taxIdToProperties.putIfAbsent(taxId, new PSPathogenReferenceTaxonProperties());
taxIdToProperties.get(taxId).addAccession(accession, length);
}
/**
* Removes nodes not in the tree from the tax_id-to-properties map
*/
static void removeUnusedTaxIds(final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties,
final PSTree tree) {
taxIdToProperties.keySet().retainAll(tree.getNodeIDs());
}
/**
* Create reference_name-to-taxid map (just an inversion on taxIdToProperties)
*/
protected static Map<String, Integer> buildAccessionToTaxIdMap(final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties,
final PSTree tree,
final int minNonVirusContigLength) {
final Map<String, Integer> accessionToTaxId = new HashMap<>();
for (final int taxId : taxIdToProperties.keySet()) {
final boolean isVirus = tree.getPathOf(taxId).contains(PSTaxonomyConstants.VIRUS_ID);
final PSPathogenReferenceTaxonProperties taxonProperties = taxIdToProperties.get(taxId);
for (final String name : taxonProperties.getAccessions()) {
if (isVirus || taxonProperties.getAccessionLength(name) >= minNonVirusContigLength) {
accessionToTaxId.put(name, taxId);
}
}
}
return accessionToTaxId;
}
/**
* Returns a PSTree representing a reduced taxonomic tree containing only taxa present in the reference
*/
protected static PSTree buildTaxonomicTree(final Map<Integer, PSPathogenReferenceTaxonProperties> taxIdToProperties) {
//Build tree of all taxa
final PSTree tree = new PSTree(PSTaxonomyConstants.ROOT_ID);
final Collection<Integer> invalidIds = new HashSet<>(taxIdToProperties.size());
for (final int taxId : taxIdToProperties.keySet()) {
if (taxId != PSTaxonomyConstants.ROOT_ID) {
final PSPathogenReferenceTaxonProperties taxonProperties = taxIdToProperties.get(taxId);
if (taxonProperties.getName() != null && taxonProperties.getParent() != PSTree.NULL_NODE && taxonProperties.getRank() != null) {
tree.addNode(taxId, taxonProperties.getName(), taxonProperties.getParent(), taxonProperties.getTotalLength(), taxonProperties.getRank());
} else {
invalidIds.add(taxId);
}
}
}
PSUtils.logItemizedWarning(logger, invalidIds, "The following taxonomic IDs did not have name/taxonomy information (this may happen when the catalog and taxdump files are inconsistent)");
final Set<Integer> unreachableNodes = tree.removeUnreachableNodes();
if (!unreachableNodes.isEmpty()) {
PSUtils.logItemizedWarning(logger, unreachableNodes, "Removed " + unreachableNodes.size() + " unreachable tree nodes");
}
tree.checkStructure();
//Trim tree down to nodes corresponding only to reference taxa (and their ancestors)
final Set<Integer> relevantNodes = new HashSet<>();
for (final int taxonId : taxIdToProperties.keySet()) {
if (!taxIdToProperties.get(taxonId).getAccessions().isEmpty() && tree.hasNode(taxonId)) {
relevantNodes.addAll(tree.getPathOf(taxonId));
}
}
if (relevantNodes.isEmpty()) {
throw new UserException.BadInput("Did not find any taxa corresponding to reference sequence names.\n\n"
+ "Check that reference names follow one of the required formats:\n\n"
+ "\t...|ref|<accession.version>|...\n"
+ "\t...|taxid|<taxonomy_id>|...\n"
+ "\t<accession.version><mask>...");
}
tree.retainNodes(relevantNodes);
return tree;
}
/**
* Gets a buffered reader for a gzipped file
* @param path File path
* @return Reader for the file
*/
public static BufferedReader getBufferedReaderGz(final String path) {
try {
return new BufferedReader(IOUtils.makeReaderMaybeGzipped(new File(path)));
} catch (final IOException e) {
throw new UserException.BadInput("Could not open file " + path, e);
}
}
/**
* Gets a Reader for a file in a gzipped tarball
* @param tarPath Path to the tarball
* @param fileName File within the tarball
* @return The file's reader
*/
public static BufferedReader getBufferedReaderTarGz(final String tarPath, final String fileName) {
try {
InputStream result = null;
final TarArchiveInputStream tarStream = new TarArchiveInputStream(new GZIPInputStream(new FileInputStream(tarPath)));
TarArchiveEntry entry = tarStream.getNextTarEntry();
while (entry != null) {
if (entry.getName().equals(fileName)) {
result = tarStream;
break;
}
entry = tarStream.getNextTarEntry();
}
if (result == null) {
throw new UserException.BadInput("Could not find file " + fileName + " in tarball " + tarPath);
}
return new BufferedReader(new InputStreamReader(result));
} catch (final IOException e) {
throw new UserException.BadInput("Could not open compressed tarball file " + fileName + " in " + tarPath, e);
}
}
/**
* Writes objects using Kryo to specified local file path.
* NOTE: using setReferences(false), which must also be set when reading the file. Does not work with nested
* objects that reference its parent.
*/
public static void writeTaxonomyDatabase(final String filePath, final PSTaxonomyDatabase taxonomyDatabase) {
try {
final Kryo kryo = new Kryo();
kryo.setReferences(false);
Output output = new Output(new FileOutputStream(filePath));
kryo.writeObject(output, taxonomyDatabase);
output.close();
} catch (final FileNotFoundException e) {
throw new UserException.CouldNotCreateOutputFile("Could not serialize objects to file", e);
}
}
}<|fim▁end|> | /** |
<|file_name|>wss.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import socket
import re
import binascii
import struct
import time
import sys
import random
from base64 import b64encode
from hashlib import sha1
from thread import *
events = "/var/www/map/eventstream"
with open(events) as f:
content = f.read().splitlines()
f.close()
websocket_answer = (
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: {key}\r\n\r\n',
)
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket Created'
try:
s.bind(('192.168.1.101', 443))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
s.listen(10)
print "Listening for connections"
def clientthread(client):
while True:
for line in content:<|fim▁hole|> preamble = "\x81\x7e" + struct.pack(">i", length)[2:]
client.send(preamble+line)
print "Sending Attack Event Size: " + hex(length) + " Bytes\n"
random.seed()
n = random.random()
time.sleep(n)
client.close()
while 1:
client, address = s.accept()
print 'Got connection from', address
text = client.recv(1024)
print text
key = (re.search('Sec-WebSocket-Key:\s+(.*?)[\n\r]+', text)
.groups()[0]
.strip())
response_key = b64encode(sha1(key + GUID).digest())
response = '\r\n'.join(websocket_answer).format(key=response_key)
print response
client.send(response)
client.recv(1)
start_new_thread(clientthread ,(client,))
s.close()<|fim▁end|> | length = len(line) |
<|file_name|>bitcoin_es_DO.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="es_DO" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About DarkChain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+39"/>
<source><b>DarkChain</b> version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2014 The DarkChain developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Este es un software experimental.
Distribuido bajo la licencia MIT/X11, vea el archivo adjunto
COPYING o http://www.opensource.org/licenses/mit-license.php.
Este producto incluye software desarrollado por OpenSSL Project para su uso en
el OpenSSL Toolkit (http://www.openssl.org/) y software criptográfico escrito por
Eric Young ([email protected]) y el software UPnP escrito por Thomas Bernard.</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Haga doble clic para editar una dirección o etiqueta</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Crear una nueva dirección</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copiar la dirección seleccionada al portapapeles del sistema</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-46"/>
<source>These are your DarkChain addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation>&Copiar dirección</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a DarkChain address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Borrar de la lista la dirección seleccionada</translation>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified DarkChain address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Eliminar</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>Copiar &etiqueta</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>&Editar</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Archivos de columnas separadas por coma (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Diálogo de contraseña</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Introducir contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nueva contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Repita la nueva contraseña</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Introduzca la nueva contraseña de la cartera.<br/>Por favor elija una con <b>10 o más caracteres aleatorios</b>, u <b>ocho o más palabras</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Cifrar la cartera</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Esta operación requiere su contraseña para desbloquear la cartera</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Desbloquear cartera</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Esta operación requiere su contraseña para descifrar la cartera.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Descifrar la certare</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Cambiar contraseña</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Introduzca la contraseña anterior de la cartera y la nueva. </translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Confirmar cifrado de la cartera</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>¿Seguro que desea cifrar su monedero?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANTE: Cualquier copia de seguridad que haya realizado previamente de su archivo de monedero debe reemplazarse con el nuevo archivo de monedero cifrado. Por razones de seguridad, las copias de seguridad previas del archivo de monedero no cifradas serán inservibles en cuanto comience a usar el nuevo monedero cifrado.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Aviso: ¡La tecla de bloqueo de mayúsculas está activada!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Monedero cifrado</translation>
</message>
<message>
<location line="-58"/>
<source>DarkChain will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Ha fallado el cifrado del monedero</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Ha fallado el cifrado del monedero debido a un error interno. El monedero no ha sido cifrado.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>Las contraseñas no coinciden.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Ha fallado el desbloqueo del monedero</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La contraseña introducida para descifrar el monedero es incorrecta.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Ha fallado el descifrado del monedero</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Se ha cambiado correctamente la contraseña del monedero.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+282"/>
<source>Sign &message...</source>
<translation>Firmar &mensaje...</translation>
</message>
<message>
<location line="+251"/>
<source>Synchronizing with network...</source>
<translation>Sincronizando con la red…</translation>
</message>
<message>
<location line="-319"/>
<source>&Overview</source>
<translation>&Vista general</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Mostrar vista general del monedero</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transacciones</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Examinar el historial de transacciones</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>&Salir</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Salir de la aplicación</translation>
</message>
<message>
<location line="+6"/>
<source>Show information about DarkChain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Acerca de &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Mostrar información acerca de Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opciones...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>&Cifrar monedero…</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>Copia de &respaldo del monedero...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Cambiar la contraseña…</translation>
</message>
<message numerus="yes">
<location line="+259"/>
<source>~%n block(s) remaining</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-256"/>
<source>&Export...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-64"/>
<source>Send coins to a DarkChain address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Modify configuration options for DarkChain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation>Copia de seguridad del monedero en otra ubicación</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Cambiar la contraseña utilizada para el cifrado del monedero</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>Ventana de &depuración</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Abrir la consola de depuración y diagnóstico</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>&Verificar mensaje...</translation>
</message>
<message>
<location line="-202"/>
<source>DarkChain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<location line="+180"/>
<source>&About DarkChain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>Mo&strar/ocultar</translation>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>&File</source>
<translation>&Archivo</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Configuración</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>A&yuda</translation>
</message>
<message>
<location line="+12"/>
<source>Tabs toolbar</source>
<translation>Barra de pestañas</translation>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>DarkChain client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+75"/>
<source>%n active connection(s) to DarkChain network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="-312"/>
<source>About DarkChain card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about DarkChain card</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+297"/>
<source>%n minute(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>Actualizado</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>Actualizando...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Transacción enviada</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Transacción entrante</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Fecha: %1
Cantidad: %2
Tipo: %3
Dirección: %4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid DarkChain address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>El monedero está <b>cifrado</b> y actualmente <b>desbloqueado</b></translation>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>El monedero está <b>cifrado</b> y actualmente <b>bloqueado</b></translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation><numerusform>%n hora</numerusform><numerusform>%n horas</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n día</numerusform><numerusform>%n días</numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. DarkChain can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation>Alerta de red</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation>Cantidad:</translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Cuantía:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>Tasa:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Envío pequeño:</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation>no</translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation>Después de tasas:</translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation>Cambio:</translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation>(des)selecciona todos</translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation>Modo arbol</translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation>Modo lista</translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation>Confirmaciones</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Confirmado</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation>Prioridad</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>Copiar identificador de transacción</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>Copiar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Copiar después de aplicar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Copiar bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Copiar prioridad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Copiar envío pequeño</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Copiar cambio</translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation>lo más alto</translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation>alto</translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation>medio-alto</translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation>medio</translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation>bajo-medio</translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation>bajo</translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation>lo más bajo</translation>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation>si</translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation>Enviar desde %1 (%2)</translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation>(cambio)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Editar Dirección</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etiqueta</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Dirección</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>Nueva dirección de recepción</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nueva dirección de envío</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Editar dirección de recepción</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Editar dirección de envío</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>La dirección introducida "%1" ya está presente en la libreta de direcciones.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid DarkChain address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>No se pudo desbloquear el monedero.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Ha fallado la generación de la nueva clave.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>DarkChain-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opciones</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Principal</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Comisión de &transacciones</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start DarkChain after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start DarkChain on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&Red</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the DarkChain client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Mapear el puerto usando &UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the DarkChain network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Dirección &IP del proxy:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Puerto:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Puerto del servidor proxy (ej. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>&Versión SOCKS:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Versión del proxy SOCKS (ej. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Ventana</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Minimizar la ventana a la bandeja de iconos del sistema.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimizar a la bandeja en vez de a la barra de tareas</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimizar en lugar de salir de la aplicación al cerrar la ventana. Cuando esta opción está activa, la aplicación solo se puede cerrar seleccionando Salir desde el menú.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimizar al cerrar</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Interfaz</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>I&dioma de la interfaz de usuario</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting DarkChain.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Mostrar las cantidades en la &unidad:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Elegir la subdivisión predeterminada para mostrar cantidades en la interfaz y cuando se envían monedas.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show DarkChain addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Mostrar las direcciones en la lista de transacciones</translation>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation>Mostrar o no características de control de moneda</translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&Aceptar</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Cancelar</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>predeterminado</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting DarkChain.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>La dirección proxy indicada es inválida.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Desde</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the DarkChain network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>No confirmado(s):</translation>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>Su balance actual gastable</translation>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation>No disponible:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Saldo recién minado que aún no está disponible.</translation>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation>Total:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>Su balance actual total</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Movimientos recientes</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>desincronizado</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Nombre del cliente</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation>N/D</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Versión del cliente</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Información</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Utilizando la versión OpenSSL</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Hora de inicio</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Red</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Número de conexiones</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Cadena de bloques</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Número actual de bloques</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Bloques totales estimados</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Hora del último bloque</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Abrir</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the DarkChain-Qt help message to get a list with possible DarkChain command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Consola</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Fecha de compilación</translation>
</message>
<message>
<location line="-104"/>
<source>DarkChain - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>DarkChain Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Archivo de registro de depuración</translation>
</message>
<message>
<location line="+7"/>
<source>Open the DarkChain debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Borrar consola</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the DarkChain RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Use las flechas arriba y abajo para navegar por el historial y <b>Control+L</b> para limpiar la pantalla.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Escriba <b>help</b> para ver un resumen de los comandos disponibles.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Enviar monedas</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation>Características de control de la moneda</translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation>Entradas...</translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation>Seleccionado automaticamente</translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation>Fondos insuficientes!</translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation>Cantidad:</translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Cuantía:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 KEY</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>Prioridad:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>Tasa:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Envío pequeño:</translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation>Después de tasas:</translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Enviar a múltiples destinatarios de una vez</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Añadir &destinatario</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Limpiar &todo</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 KEY</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Confirmar el envío</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Enviar</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a DarkChain address (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>Copiar cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copiar cuantía</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>Copiar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Copiar después de aplicar donación</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Copiar bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Copiar prioridad</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Copiar envío pequeño</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Copiar Cambio</translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Confirmar el envío de monedas</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>La dirección de recepción no es válida, compruébela de nuevo.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>La cantidad por pagar tiene que ser mayor de 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>La cantidad sobrepasa su saldo.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>El total sobrepasa su saldo cuando se incluye la tasa de envío de %1</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Se ha encontrado una dirección duplicada. Solo se puede enviar a cada dirección una vez por operación de envío.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid DarkChain address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>Ca&ntidad:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>&Pagar a:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Etiquete esta dirección para añadirla a la libreta</translation>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation>&Etiqueta:</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a DarkChain address (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Firmas - Firmar / verificar un mensaje</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>&Firmar mensaje</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Puede firmar mensajes con sus direcciones para demostrar que las posee. Tenga cuidado de no firmar cualquier cosa vaga, ya que los ataques de phishing pueden tratar de engañarle para suplantar su identidad. Firme solo declaraciones totalmente detalladas con las que usted esté de acuerdo.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Introduzca el mensaje que desea firmar aquí</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Copiar la firma actual al portapapeles del sistema</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this DarkChain address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>Limpiar todos los campos de la firma de mensaje</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Limpiar &todo</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>&Verificar mensaje</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Introduzca la dirección para la firma, el mensaje (asegurándose de copiar tal cual los saltos de línea, espacios, tabulaciones, etc.) y la firma a continuación para verificar el mensaje. Tenga cuidado de no asumir más información de lo que dice el propio mensaje firmado para evitar fraudes basados en ataques de tipo man-in-the-middle.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified DarkChain address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>Limpiar todos los campos de la verificación de mensaje</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a DarkChain address (e.g. Sjz75uKHzUQJnSdzvpiigEGxseKkDhQToX)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Haga clic en "Firmar mensaje" para generar la firma</translation>
</message>
<message>
<location line="+3"/>
<source>Enter DarkChain signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>La dirección introducida es inválida.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Verifique la dirección e inténtelo de nuevo.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>La dirección introducida no corresponde a una clave.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Se ha cancelado el desbloqueo del monedero. </translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>No se dispone de la clave privada para la dirección introducida.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Ha fallado la firma del mensaje.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Mensaje firmado.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>No se puede decodificar la firma.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Compruebe la firma e inténtelo de nuevo.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>La firma no coincide con el resumen del mensaje.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>La verificación del mensaje ha fallado.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Mensaje verificado.</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/fuera de línea</translation>
</message>
<message>
<location line="+2"/><|fim▁hole|> <location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 confirmaciones</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Estado</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, transmitir a través de %n nodo</numerusform><numerusform>, transmitir a través de %n nodos</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Fuente</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Generado</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>De</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Para</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>dirección propia</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>etiqueta</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Crédito</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>disponible en %n bloque más</numerusform><numerusform>disponible en %n bloques más</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>no aceptada</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Débito</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Comisión de transacción</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Cantidad neta</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Mensaje</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Comentario</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Identificador de transacción</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Información de depuración</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transacción</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>entradas</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>verdadero</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>falso</translation>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation>, todavía no se ha sido difundido satisfactoriamente</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>desconocido</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Detalles de transacción</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Esta ventana muestra información detallada sobre la transacción</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Confirmado (%1 confirmaciones)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Abrir para %n bloque más</numerusform><numerusform>Abrir para %n bloques más</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Este bloque no ha sido recibido por otros nodos y probablemente no sea aceptado!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generado pero no aceptado</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Recibidos de</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Pago propio</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(nd)</translation>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Estado de transacción. Pasa el ratón sobre este campo para ver el número de confirmaciones.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Fecha y hora en que se recibió la transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Tipo de transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Dirección de destino de la transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Cantidad retirada o añadida al saldo.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>Todo</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Hoy</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Esta semana</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Este mes</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Mes pasado</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Este año</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Rango...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>A usted mismo</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Otra</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Introduzca una dirección o etiqueta que buscar</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Cantidad mínima</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copiar cuantía</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Copiar identificador de transacción</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Editar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Mostrar detalles de la transacción</translation>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Archivos de columnas separadas por coma (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Confirmado</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Rango:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>para</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>DarkChain version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Uso:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or DarkChaind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Muestra comandos
</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Recibir ayuda para un comando
</translation>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation>Opciones:
</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: DarkChain.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: DarkChaind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation>Especificar archivo de monedero (dentro del directorio de datos)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Especificar directorio para los datos</translation>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Establecer el tamaño de caché de la base de datos en megabytes (predeterminado: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Mantener como máximo <n> conexiones a pares (predeterminado: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Conectar a un nodo para obtener direcciones de pares y desconectar</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>Especifique su propia dirección pública</translation>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Umbral para la desconexión de pares con mal comportamiento (predeterminado: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Número de segundos en que se evita la reconexión de pares con mal comportamiento (predeterminado: 86400)</translation>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Ha ocurrido un error al configurar el puerto RPC %u para escucha en IPv4: %s</translation>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Aceptar comandos consola y JSON-RPC
</translation>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation type="unfinished"/>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Ejecutar en segundo plano como daemon y aceptar comandos
</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Usar la red de pruebas
</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Aceptar conexiones desde el exterior (predeterminado: 1 si no -proxy o -connect)</translation>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Ha ocurrido un error al configurar el puerto RPC %u para escuchar mediante IPv6. Recurriendo a IPv4: %s</translation>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Aviso: ¡-paytxfee tiene un valor muy alto! Esta es la comisión que pagará si envía una transacción.</translation>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong DarkChain will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Aviso: ¡Error al leer wallet.dat! Todas las claves se han leído correctamente, pero podrían faltar o ser incorrectos los datos de transacciones o las entradas de la libreta de direcciones.</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Aviso: ¡Recuperados datos de wallet.dat corrupto! El wallet.dat original se ha guardado como wallet.{timestamp}.bak en %s; si hubiera errores en su saldo o transacciones, deberá restaurar una copia de seguridad.</translation>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Intento de recuperar claves privadas de un wallet.dat corrupto</translation>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation>Opciones de creación de bloques:</translation>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation>Conectar sólo a los nodos (o nodo) especificados</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Descubrir dirección IP propia (predeterminado: 1 al escuchar sin -externalip)</translation>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Ha fallado la escucha en todos los puertos. Use -listen=0 si desea esto.</translation>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Búfer de recepción máximo por conexión, <n>*1000 bytes (predeterminado: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Búfer de recepción máximo por conexión, , <n>*1000 bytes (predeterminado: 1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Conectarse solo a nodos de la red <net> (IPv4, IPv6 o Tor)</translation>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>Opciones SSL: (ver la Bitcoin Wiki para instrucciones de configuración SSL)</translation>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Enviar información de trazas/depuración a la consola en lugar de al archivo debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Establecer tamaño mínimo de bloque en bytes (predeterminado: 0)</translation>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Reducir el archivo debug.log al iniciar el cliente (predeterminado: 1 sin -debug)</translation>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Especificar el tiempo máximo de conexión en milisegundos (predeterminado: 5000)</translation>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Usar UPnP para asignar el puerto de escucha (predeterminado: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Usar UPnP para asignar el puerto de escucha (predeterminado: 1 al escuchar)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation>Nombre de usuario para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Aviso: Esta versión es obsoleta, actualización necesaria!</translation>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat corrupto. Ha fallado la recuperación.</translation>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation>Contraseña para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=DarkChainrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "DarkChain Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Permitir conexiones JSON-RPC desde la dirección IP especificada
</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Enviar comando al nodo situado en <ip> (predeterminado: 127.0.0.1)
</translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Ejecutar un comando cuando cambia el mejor bloque (%s en cmd se sustituye por el hash de bloque)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Ejecutar comando cuando una transacción del monedero cambia (%s en cmd se remplazará por TxID)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Actualizar el monedero al último formato</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Ajustar el número de claves en reserva <n> (predeterminado: 100)
</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Volver a examinar la cadena de bloques en busca de transacciones del monedero perdidas</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Usar OpenSSL (https) para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Certificado del servidor (predeterminado: server.cert)
</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Clave privada del servidor (predeterminado: server.pem)
</translation>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation>Este mensaje de ayuda
</translation>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. DarkChain is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-98"/>
<source>DarkChain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>No es posible conectar con %s en este sistema (bind ha dado el error %d, %s)</translation>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Permitir búsquedas DNS para -addnode, -seednode y -connect</translation>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation>Cargando direcciones...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Error al cargar wallet.dat: el monedero está dañado</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of DarkChain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart DarkChain to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Error al cargar wallet.dat</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Dirección -proxy inválida: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>La red especificada en -onlynet '%s' es desconocida</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Solicitada versión de proxy -socks desconocida: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>No se puede resolver la dirección de -bind: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>No se puede resolver la dirección de -externalip: '%s'</translation>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Cantidad inválida para -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Cuantía no válida</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Fondos insuficientes</translation>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>Cargando el índice de bloques...</translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Añadir un nodo al que conectarse y tratar de mantener la conexión abierta</translation>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. DarkChain is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>Cargando monedero...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>No se puede rebajar el monedero</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>No se puede escribir la dirección predeterminada</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Reexplorando...</translation>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>Generado pero no aceptado</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation>Para utilizar la opción %s</translation>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>Error</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Tiene que establecer rpcpassword=<contraseña> en el fichero de configuración: ⏎
%s ⏎
Si el archivo no existe, créelo con permiso de lectura solamente del propietario.</translation>
</message>
</context>
</TS><|fim▁end|> | <source>%1/unconfirmed</source>
<translation>%1/no confirmado</translation>
</message>
<message> |
<|file_name|>InformixUniqueDelegate.java<|end_file_name|><|fim▁begin|>/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later.
* See the lgpl.txt file in the root directory or <http://www.gnu.org/licenses/lgpl-2.1.html>.
*/
package org.hibernate.dialect.unique;
import org.hibernate.boot.Metadata;
import org.hibernate.dialect.Dialect;
import org.hibernate.mapping.UniqueKey;
/**
* Informix requires the constraint name to come last on the alter table.
*
* @author Brett Meyer
*/
public class InformixUniqueDelegate extends DefaultUniqueDelegate {
public InformixUniqueDelegate( Dialect dialect ) {
super( dialect );
}
// legacy model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Override
public String getAlterTableToAddUniqueKeyCommand(UniqueKey uniqueKey, Metadata metadata) {
// Do this here, rather than allowing UniqueKey/Constraint to do it.
// We need full, simplified control over whether or not it happens.
final String tableName = metadata.getDatabase().getJdbcEnvironment().getQualifiedObjectNameFormatter().format(
uniqueKey.getTable().getQualifiedTableName(),
metadata.getDatabase().getJdbcEnvironment().getDialect()
);
final String constraintName = dialect.quote( uniqueKey.getName() );
return dialect.getAlterTableString( tableName )
+ " add constraint " + uniqueConstraintSql( uniqueKey ) + " constraint " + constraintName;
}<|fim▁hole|>
}<|fim▁end|> | |
<|file_name|>mrp_production.py<|end_file_name|><|fim▁begin|># -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models
import math
<|fim▁hole|>
_inherit = 'mrp.production'
def _get_workorder_in_product_lines(
self, workcenter_lines, product_lines, properties=None):
super(MrpProduction, self)._get_workorder_in_product_lines(
workcenter_lines, product_lines, properties=properties)
for workorder in workcenter_lines:
wc = workorder.routing_wc_line
cycle = wc.cycle_nbr and (self.product_qty / wc.cycle_nbr) or 0
if self.company_id.complete_cycle:
cycle = int(math.ceil(cycle))
workorder.cycle = cycle
workorder.hour = wc.hour_nbr * cycle<|fim▁end|> |
class MrpProduction(models.Model): |
<|file_name|>photo_loader_tests.py<|end_file_name|><|fim▁begin|># coding=utf-8
from elections.tests import VotaInteligenteTestCase as TestCase
from elections.models import Election
from django.core.urlresolvers import reverse
from candideitorg.models import Candidate
from django.core.management import call_command
class PhotoLoaderCase(TestCase):
def setUp(self):
super(PhotoLoaderCase, self).setUp()
def test_it_loads_the_photo_for_an_existing_candidate(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo_url.csv', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'http://upload.wikimedia.org/wikipedia/commons/7/76/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'http://www.2eso.info/sinonimos/wp-content/uploads/2013/02/feo1.jpg')
def test_if_the_candidate_does_not_exist_it_does_it_for_the_rest(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo_url.csv', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
<|fim▁hole|> self.assertEquals(jano.photo, 'http://upload.wikimedia.org/wikipedia/commons/7/76/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'http://www.2eso.info/sinonimos/wp-content/uploads/2013/02/feo1.jpg')
def test_it_prepends_url_when_provided(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo.csv', 'some.site/static/', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'some.site/static/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'some.site/static/feo1.jpg')<|fim▁end|> | |
<|file_name|>helper.js<|end_file_name|><|fim▁begin|>var is_touch;
function is_touch_device() {
return 'ontouchstart' in window // works on most browsers
|| navigator.maxTouchPoints; // works on IE10/11 and Surface
}
<|fim▁hole|>})();<|fim▁end|> | (function() {
is_touch = is_touch_device(); |
<|file_name|>lock.py<|end_file_name|><|fim▁begin|>"""holds locking functionality that works across processes"""
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
import py
from filelock import FileLock, Timeout
from tox.reporter import verbosity1
@contextmanager
def hold_lock(lock_file, reporter=verbosity1):
py.path.local(lock_file.dirname).ensure(dir=1)
lock = FileLock(str(lock_file))
try:
try:
lock.acquire(0.0001)
except Timeout:
reporter("lock file {} present, will block until released".format(lock_file))
lock.acquire()
yield
finally:
lock.release(force=True)
def get_unique_file(path, prefix, suffix):
"""get a unique file in a folder having a given prefix and suffix,
with unique number in between"""
lock_file = path.join(".lock")
prefix = "{}-".format(prefix)
with hold_lock(lock_file):
max_value = -1
for candidate in path.listdir("{}*{}".format(prefix, suffix)):
try:
max_value = max(max_value, int(candidate.basename[len(prefix) : -len(suffix)]))
except ValueError:<|fim▁hole|> return winner<|fim▁end|> | continue
winner = path.join("{}{}{}".format(prefix, max_value + 1, suffix))
winner.ensure(dir=0) |
<|file_name|>GrantType.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2016 Yu Sheng. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, without warranties or
* conditions of any kind, EITHER EXPRESS OR IMPLIED. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.ysheng.auth.model.api;
/**
* Defines the grant types.
*/
public enum GrantType {
AUTHORIZATION_CODE,
IMPLICIT,<|fim▁hole|><|fim▁end|> | PASSWORD,
CLIENT_CREDENTIALS
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.